python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/net_tstamp.h>
#include "enic_res.h"
#include "enic.h"
#include "enic_dev.h"
#include "enic_clsf.h"
#include "vnic_rss.h"
#include "vnic_stats.h"
struct enic_stat {
char name[ETH_GSTRING_LEN];
unsigned int index;
};
#define ENIC_TX_STAT(stat) { \
.name = #stat, \
.index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
}
#define ENIC_RX_STAT(stat) { \
.name = #stat, \
.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
}
#define ENIC_GEN_STAT(stat) { \
.name = #stat, \
.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
}
static const struct enic_stat enic_tx_stats[] = {
ENIC_TX_STAT(tx_frames_ok),
ENIC_TX_STAT(tx_unicast_frames_ok),
ENIC_TX_STAT(tx_multicast_frames_ok),
ENIC_TX_STAT(tx_broadcast_frames_ok),
ENIC_TX_STAT(tx_bytes_ok),
ENIC_TX_STAT(tx_unicast_bytes_ok),
ENIC_TX_STAT(tx_multicast_bytes_ok),
ENIC_TX_STAT(tx_broadcast_bytes_ok),
ENIC_TX_STAT(tx_drops),
ENIC_TX_STAT(tx_errors),
ENIC_TX_STAT(tx_tso),
};
static const struct enic_stat enic_rx_stats[] = {
ENIC_RX_STAT(rx_frames_ok),
ENIC_RX_STAT(rx_frames_total),
ENIC_RX_STAT(rx_unicast_frames_ok),
ENIC_RX_STAT(rx_multicast_frames_ok),
ENIC_RX_STAT(rx_broadcast_frames_ok),
ENIC_RX_STAT(rx_bytes_ok),
ENIC_RX_STAT(rx_unicast_bytes_ok),
ENIC_RX_STAT(rx_multicast_bytes_ok),
ENIC_RX_STAT(rx_broadcast_bytes_ok),
ENIC_RX_STAT(rx_drop),
ENIC_RX_STAT(rx_no_bufs),
ENIC_RX_STAT(rx_errors),
ENIC_RX_STAT(rx_rss),
ENIC_RX_STAT(rx_crc_errors),
ENIC_RX_STAT(rx_frames_64),
ENIC_RX_STAT(rx_frames_127),
ENIC_RX_STAT(rx_frames_255),
ENIC_RX_STAT(rx_frames_511),
ENIC_RX_STAT(rx_frames_1023),
ENIC_RX_STAT(rx_frames_1518),
ENIC_RX_STAT(rx_frames_to_max),
};
static const struct enic_stat enic_gen_stats[] = {
ENIC_GEN_STAT(dma_map_error),
};
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
{
int i;
int intr;
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
}
}
static int enic_get_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
struct enic *enic = netdev_priv(netdev);
struct ethtool_link_settings *base = &ecmd->base;
ethtool_link_ksettings_add_link_mode(ecmd, supported,
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
ethtool_link_ksettings_add_link_mode(ecmd, advertising,
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
base->port = PORT_FIBRE;
if (netif_carrier_ok(netdev)) {
base->speed = vnic_dev_port_speed(enic->vdev);
base->duplex = DUPLEX_FULL;
} else {
base->speed = SPEED_UNKNOWN;
base->duplex = DUPLEX_UNKNOWN;
}
base->autoneg = AUTONEG_DISABLE;
return 0;
}
static void enic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_devcmd_fw_info *fw_info;
int err;
err = enic_dev_fw_info(enic, &fw_info);
/* return only when dma_alloc_coherent fails in vnic_dev_fw_info
* For other failures, like devcmd failure, we return previously
* recorded info.
*/
if (err == -ENOMEM)
return;
strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strscpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
strscpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
static void enic_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
unsigned int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < enic_n_tx_stats; i++) {
memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < enic_n_rx_stats; i++) {
memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
for (i = 0; i < enic_n_gen_stats; i++) {
memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
break;
}
}
static void enic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
ring->rx_pending = c->rq_desc_count;
ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
ring->tx_pending = c->wq_desc_count;
}
static int enic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
int running = netif_running(netdev);
unsigned int rx_pending;
unsigned int tx_pending;
int err = 0;
if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
netdev_info(netdev,
"modifying mini ring params is not supported");
return -EINVAL;
}
if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
netdev_info(netdev,
"modifying jumbo ring params is not supported");
return -EINVAL;
}
rx_pending = c->rq_desc_count;
tx_pending = c->wq_desc_count;
if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
ring->rx_pending < ENIC_MIN_RQ_DESCS) {
netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
ring->rx_pending, ENIC_MIN_RQ_DESCS,
ENIC_MAX_RQ_DESCS);
return -EINVAL;
}
if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
ring->tx_pending < ENIC_MIN_WQ_DESCS) {
netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
ring->tx_pending, ENIC_MIN_WQ_DESCS,
ENIC_MAX_WQ_DESCS);
return -EINVAL;
}
if (running)
dev_close(netdev);
c->rq_desc_count =
ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
c->wq_desc_count =
ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
enic_free_vnic_resources(enic);
err = enic_alloc_vnic_resources(enic);
if (err) {
netdev_err(netdev,
"Failed to alloc vNIC resources, aborting\n");
enic_free_vnic_resources(enic);
goto err_out;
}
enic_init_vnic_resources(enic);
if (running) {
err = dev_open(netdev, NULL);
if (err)
goto err_out;
}
return 0;
err_out:
c->rq_desc_count = rx_pending;
c->wq_desc_count = tx_pending;
return err;
}
static int enic_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
default:
return -EOPNOTSUPP;
}
}
static void enic_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_stats *vstats;
unsigned int i;
int err;
err = enic_dev_stats_dump(enic, &vstats);
/* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
if (err == -ENOMEM)
return;
for (i = 0; i < enic_n_tx_stats; i++)
*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
for (i = 0; i < enic_n_rx_stats; i++)
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
for (i = 0; i < enic_n_gen_stats; i++)
*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
}
static u32 enic_get_msglevel(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
return enic->msg_enable;
}
static void enic_set_msglevel(struct net_device *netdev, u32 value)
{
struct enic *enic = netdev_priv(netdev);
enic->msg_enable = value;
}
static int enic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
if (rxcoal->use_adaptive_rx_coalesce)
ecmd->use_adaptive_rx_coalesce = 1;
ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
return 0;
}
static int enic_coalesce_valid(struct enic *enic,
struct ethtool_coalesce *ec)
{
u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
ec->rx_coalesce_usecs_high);
u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
ec->rx_coalesce_usecs_low);
if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
ec->tx_coalesce_usecs)
return -EINVAL;
if ((ec->tx_coalesce_usecs > coalesce_usecs_max) ||
(ec->rx_coalesce_usecs > coalesce_usecs_max) ||
(ec->rx_coalesce_usecs_low > coalesce_usecs_max) ||
(ec->rx_coalesce_usecs_high > coalesce_usecs_max))
netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
coalesce_usecs_max);
if (ec->rx_coalesce_usecs_high &&
(rx_coalesce_usecs_high <
rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
return -EINVAL;
return 0;
}
static int enic_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
u32 rx_coalesce_usecs_low;
u32 rx_coalesce_usecs_high;
u32 coalesce_usecs_max;
unsigned int i, intr;
int ret;
struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
ret = enic_coalesce_valid(enic, ecmd);
if (ret)
return ret;
coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
coalesce_usecs_max);
rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
coalesce_usecs_max);
rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
coalesce_usecs_max);
rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
coalesce_usecs_max);
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
for (i = 0; i < enic->wq_count; i++) {
intr = enic_msix_wq_intr(enic, i);
vnic_intr_coalescing_timer_set(&enic->intr[intr],
tx_coalesce_usecs);
}
enic->tx_coalesce_usecs = tx_coalesce_usecs;
}
rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
if (!rxcoal->use_adaptive_rx_coalesce)
enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
if (ecmd->rx_coalesce_usecs_high) {
rxcoal->range_end = rx_coalesce_usecs_high;
rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
ENIC_AIC_LARGE_PKT_DIFF;
}
enic->rx_coalesce_usecs = rx_coalesce_usecs;
return 0;
}
static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
int j, ret = 0, cnt = 0;
cmd->data = enic->rfs_h.max - enic->rfs_h.free;
for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
struct hlist_head *hhead;
struct hlist_node *tmp;
struct enic_rfs_fltr_node *n;
hhead = &enic->rfs_h.ht_head[j];
hlist_for_each_entry_safe(n, tmp, hhead, node) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
rule_locs[cnt] = n->fltr_id;
cnt++;
}
}
cmd->rule_cnt = cnt;
return ret;
}
static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct enic_rfs_fltr_node *n;
n = htbl_fltr_search(enic, (u16)fsp->location);
if (!n)
return -EINVAL;
switch (n->keys.basic.ip_proto) {
case IPPROTO_TCP:
fsp->flow_type = TCP_V4_FLOW;
break;
case IPPROTO_UDP:
fsp->flow_type = UDP_V4_FLOW;
break;
default:
return -EINVAL;
}
fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
fsp->ring_cookie = n->rq_id;
return 0;
}
static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
{
u8 rss_hash_type = 0;
cmd->data = 0;
spin_lock_bh(&enic->devcmd_lock);
(void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
spin_unlock_bh(&enic->devcmd_lock);
switch (cmd->flow_type) {
case TCP_V6_FLOW:
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V6_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV4_FLOW:
case IPV6_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
default:
return -EINVAL;
}
return 0;
}
static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct enic *enic = netdev_priv(dev);
int ret = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = enic->rq_count;
break;
case ETHTOOL_GRXCLSRLCNT:
spin_lock_bh(&enic->rfs_h.lock);
cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
cmd->data = enic->rfs_h.max;
spin_unlock_bh(&enic->rfs_h.lock);
break;
case ETHTOOL_GRXCLSRLALL:
spin_lock_bh(&enic->rfs_h.lock);
ret = enic_grxclsrlall(enic, cmd, rule_locs);
spin_unlock_bh(&enic->rfs_h.lock);
break;
case ETHTOOL_GRXCLSRULE:
spin_lock_bh(&enic->rfs_h.lock);
ret = enic_grxclsrule(enic, cmd);
spin_unlock_bh(&enic->rfs_h.lock);
break;
case ETHTOOL_GRXFH:
ret = enic_get_rx_flow_hash(enic, cmd);
break;
default:
ret = -EOPNOTSUPP;
break;
}
return ret;
}
static int enic_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna, void *data)
{
struct enic *enic = netdev_priv(dev);
int ret = 0;
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = enic->rx_copybreak;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int enic_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct enic *enic = netdev_priv(dev);
int ret = 0;
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
enic->rx_copybreak = *(u32 *)data;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static u32 enic_get_rxfh_key_size(struct net_device *netdev)
{
return ENIC_RSS_LEN;
}
static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
u8 *hfunc)
{
struct enic *enic = netdev_priv(netdev);
if (hkey)
memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
return 0;
}
static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *hkey, const u8 hfunc)
{
struct enic *enic = netdev_priv(netdev);
if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
indir)
return -EINVAL;
if (hkey)
memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
return __enic_set_rsskey(enic);
}
static int enic_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
return 0;
}
static const struct ethtool_ops enic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_RX_USECS_LOW |
ETHTOOL_COALESCE_RX_USECS_HIGH,
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
.set_msglevel = enic_set_msglevel,
.get_link = ethtool_op_get_link,
.get_strings = enic_get_strings,
.get_ringparam = enic_get_ringparam,
.set_ringparam = enic_set_ringparam,
.get_sset_count = enic_get_sset_count,
.get_ethtool_stats = enic_get_ethtool_stats,
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
.get_rxnfc = enic_get_rxnfc,
.get_tunable = enic_get_tunable,
.set_tunable = enic_set_tunable,
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
};
void enic_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &enic_ethtool_ops;
}
|
linux-master
|
drivers/net/ethernet/cisco/enic/enic_ethtool.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_stats.h"
#include "enic.h"
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
sizeof(struct vnic_resource) * RES_TYPE_MAX)
#define VNIC_RES_STRIDE 128
void *vnic_dev_priv(struct vnic_dev *vdev)
{
return vdev->priv;
}
static int vnic_dev_discover_res(struct vnic_dev *vdev,
struct vnic_dev_bar *bar, unsigned int num_bars)
{
struct vnic_resource_header __iomem *rh;
struct mgmt_barmap_hdr __iomem *mrh;
struct vnic_resource __iomem *r;
u8 type;
if (num_bars == 0)
return -EINVAL;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
vdev_err(vdev, "vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
mrh = bar->vaddr;
if (!rh) {
vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
/* Check for mgmt vnic in addition to normal vnic */
if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
(ioread32(&rh->version) != VNIC_RES_VERSION)) {
if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
(ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
}
if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
r = (struct vnic_resource __iomem *)(mrh + 1);
else
r = (struct vnic_resource __iomem *)(rh + 1);
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
u8 bar_num = ioread8(&r->bar);
u32 bar_offset = ioread32(&r->bar_offset);
u32 count = ioread32(&r->count);
u32 len;
r++;
if (bar_num >= num_bars)
continue;
if (!bar[bar_num].len || !bar[bar_num].vaddr)
continue;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar[bar_num].len) {
vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
type, bar_offset, len,
bar[bar_num].len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
case RES_TYPE_DEVCMD2:
len = count;
break;
default:
continue;
}
vdev->res[type].count = count;
vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
bar_offset;
vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
}
return 0;
}
unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type)
{
return vdev->res[type].count;
}
EXPORT_SYMBOL(vnic_dev_get_res_count);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
{
if (!vdev->res[type].vaddr)
return NULL;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
return (char __iomem *)vdev->res[type].vaddr +
index * VNIC_RES_STRIDE;
default:
return (char __iomem *)vdev->res[type].vaddr;
}
}
EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
/* The base address of the desc rings must be 512 byte aligned.
* Descriptor count is aligned to groups of 32 descriptors. A
* count of 0 means the maximum 4096 descriptors. Descriptor
* size is aligned to 16 bytes.
*/
unsigned int count_align = 32;
unsigned int desc_align = 16;
ring->base_align = 512;
if (desc_count == 0)
desc_count = 4096;
ring->desc_count = ALIGN(desc_count, count_align);
ring->desc_size = ALIGN(desc_size, desc_align);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
return ring->size_unaligned;
}
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
{
memset(ring->descs, 0, ring->size);
}
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring->size_unaligned,
&ring->base_addr_unaligned,
GFP_KERNEL);
if (!ring->descs_unaligned) {
vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring->base_align);
ring->descs = (u8 *)ring->descs_unaligned +
(ring->base_addr - ring->base_addr_unaligned);
vnic_dev_clear_desc_ring(ring);
ring->desc_avail = ring->desc_count - 1;
return 0;
}
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
dma_free_coherent(&vdev->pdev->dev, ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
ring->descs = NULL;
}
}
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
unsigned int i;
int delay;
u32 status;
int err;
status = ioread32(&devcmd->status);
if (status == 0xFFFFFFFF) {
/* PCI-e target device is gone */
return -ENODEV;
}
if (status & STAT_BUSY) {
vdev_neterr(vdev, "Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
writeq(vdev->args[i], &devcmd->args[i]);
wmb();
}
iowrite32(cmd, &devcmd->cmd);
if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
return 0;
for (delay = 0; delay < wait; delay++) {
udelay(100);
status = ioread32(&devcmd->status);
if (status == 0xFFFFFFFF) {
/* PCI-e target device is gone */
return -ENODEV;
}
if (!(status & STAT_BUSY)) {
if (status & STAT_ERROR) {
err = (int)readq(&devcmd->args[0]);
if (err == ERR_EINVAL &&
cmd == CMD_CAPABILITY)
return -err;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
vdev_neterr(vdev, "Error %d devcmd %d\n",
err, _CMD_N(cmd));
return -err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
rmb();
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
vdev->args[i] = readq(&devcmd->args[i]);
}
return 0;
}
}
vdev_neterr(vdev, "Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
struct devcmd2_result *result;
u8 color;
unsigned int i;
int delay, err;
u32 fetch_index, new_posted;
u32 posted = dc2c->posted;
fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF)
return -ENODEV;
new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
if (new_posted == fetch_index) {
vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
_CMD_N(cmd), fetch_index, posted);
return -EBUSY;
}
dc2c->cmd_ring[posted].cmd = cmd;
dc2c->cmd_ring[posted].flags = 0;
if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
dc2c->cmd_ring[posted].args[i] = vdev->args[i];
/* Adding write memory barrier prevents compiler and/or CPU reordering,
* thus avoiding descriptor posting before descriptor is initialized.
* Otherwise, hardware can read stale descriptor fields.
*/
wmb();
iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
dc2c->posted = new_posted;
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
return 0;
result = dc2c->result + dc2c->next_result;
color = dc2c->color;
dc2c->next_result++;
if (dc2c->next_result == dc2c->result_size) {
dc2c->next_result = 0;
dc2c->color = dc2c->color ? 0 : 1;
}
for (delay = 0; delay < wait; delay++) {
if (result->color == color) {
if (result->error) {
err = result->error;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
vdev_neterr(vdev, "Error %d devcmd %d\n",
err, _CMD_N(cmd));
return -err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ)
for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
vdev->args[i] = result->results[i];
return 0;
}
udelay(100);
}
vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
{
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd)
return -ENODEV;
vdev->devcmd_rtn = _vnic_dev_cmd;
return 0;
}
static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
{
int err;
unsigned int fetch_index;
if (vdev->devcmd2)
return 0;
vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
if (!vdev->devcmd2)
return -ENOMEM;
vdev->devcmd2->color = 1;
vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
DEVCMD2_DESC_SIZE);
if (err)
goto err_free_devcmd2;
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n");
err = -ENODEV;
goto err_free_wq;
}
enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
0);
vdev->devcmd2->posted = fetch_index;
vnic_wq_enable(&vdev->devcmd2->wq);
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err)
goto err_disable_wq;
vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
VNIC_PADDR_TARGET;
vdev->args[1] = DEVCMD2_RING_SIZE;
err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
if (err)
goto err_free_desc_ring;
vdev->devcmd_rtn = _vnic_dev_cmd2;
return 0;
err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq);
err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2:
kfree(vdev->devcmd2);
vdev->devcmd2 = NULL;
return err;
}
static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
vnic_wq_disable(&vdev->devcmd2->wq);
vnic_wq_free(&vdev->devcmd2->wq);
kfree(vdev->devcmd2);
}
static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
u32 status;
int err;
memset(vdev->args, 0, sizeof(vdev->args));
vdev->args[0] = vdev->proxy_index;
vdev->args[1] = cmd;
vdev->args[2] = *a0;
vdev->args[3] = *a1;
err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
if (err)
return err;
status = (u32)vdev->args[0];
if (status & STAT_ERROR) {
err = (int)vdev->args[1];
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
vdev_neterr(vdev, "Error %d proxy devcmd %d\n",
err, _CMD_N(cmd));
return err;
}
*a0 = vdev->args[1];
*a1 = vdev->args[2];
return 0;
}
static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
{
int err;
vdev->args[0] = *a0;
vdev->args[1] = *a1;
err = vdev->devcmd_rtn(vdev, cmd, wait);
*a0 = vdev->args[0];
*a1 = vdev->args[1];
return err;
}
void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
{
vdev->proxy = PROXY_BY_INDEX;
vdev->proxy_index = index;
}
void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
{
vdev->proxy = PROXY_NONE;
vdev->proxy_index = 0;
}
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
memset(vdev->args, 0, sizeof(vdev->args));
switch (vdev->proxy) {
case PROXY_BY_INDEX:
return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
a0, a1, wait);
case PROXY_BY_BDF:
return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
a0, a1, wait);
case PROXY_NONE:
default:
return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
}
}
static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
int wait = 1000;
int err;
err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
return !(err || a0);
}
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
u64 a0, a1 = 0;
int wait = 1000;
int err = 0;
if (!vdev->fw_info) {
vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa, GFP_ATOMIC);
if (!vdev->fw_info)
return -ENOMEM;
a0 = vdev->fw_info_pa;
a1 = sizeof(struct vnic_devcmd_fw_info);
/* only get fw_info once and cache it */
if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
&a0, &a1, wait);
else
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
&a0, &a1, wait);
}
*fw_info = vdev->fw_info;
return err;
}
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{
u64 a0, a1;
int wait = 1000;
int err;
a0 = offset;
a1 = size;
err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
switch (size) {
case 1: *(u8 *)value = (u8)a0; break;
case 2: *(u16 *)value = (u16)a0; break;
case 4: *(u32 *)value = (u32)a0; break;
case 8: *(u64 *)value = a0; break;
default: BUG(); break;
}
return err;
}
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = 1000;
if (!vdev->stats) {
vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
&vdev->stats_pa, GFP_ATOMIC);
if (!vdev->stats)
return -ENOMEM;
}
*stats = vdev->stats;
a0 = vdev->stats_pa;
a1 = sizeof(struct vnic_stats);
return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
}
int vnic_dev_close(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
}
int vnic_dev_enable_wait(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
else
return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
}
int vnic_dev_disable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
}
int vnic_dev_open(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
}
int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
}
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
int err;
if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
return vnic_dev_cmd(vdev, CMD_HANG_RESET,
&a0, &a1, wait);
} else {
err = vnic_dev_soft_reset(vdev, arg);
if (err)
return err;
return vnic_dev_init(vdev, 0);
}
}
int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
&a0, &a1, wait);
if (err)
return err;
} else {
return vnic_dev_soft_reset_done(vdev, done);
}
*done = (a0 == 0);
return 0;
}
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
u64 a0, a1;
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = ((u8 *)&a0)[i];
return 0;
}
int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti)
{
u64 a0, a1 = 0;
int wait = 1000;
int err;
a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
(multicast ? CMD_PFILTER_MULTICAST : 0) |
(broadcast ? CMD_PFILTER_BROADCAST : 0) |
(promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
(allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
vdev_neterr(vdev, "Can't set packet filter\n");
return err;
}
int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
if (err)
vdev_neterr(vdev, "Can't add addr [%pM], %d\n", addr, err);
return err;
}
int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a0)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
if (err)
vdev_neterr(vdev, "Can't del addr [%pM], %d\n", addr, err);
return err;
}
int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
u8 ig_vlan_rewrite_mode)
{
u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
int wait = 1000;
if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
&a0, &a1, wait);
else
return 0;
}
static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
void *notify_addr, dma_addr_t notify_pa, u16 intr)
{
u64 a0, a1;
int wait = 1000;
int r;
memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
vdev->notify = notify_addr;
vdev->notify_pa = notify_pa;
a0 = (u64)notify_pa;
a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
return r;
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
{
void *notify_addr;
dma_addr_t notify_pa;
if (vdev->notify || vdev->notify_pa) {
vdev_neterr(vdev, "notify block %p still allocated\n",
vdev->notify);
return -EINVAL;
}
notify_addr = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
¬ify_pa, GFP_ATOMIC);
if (!notify_addr)
return -ENOMEM;
return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
}
static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
int err;
a0 = 0; /* paddr = 0 to unset notify buffer */
a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
a1 += sizeof(struct vnic_devcmd_notify);
err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
vdev->notify = NULL;
vdev->notify_pa = 0;
vdev->notify_sz = 0;
return err;
}
int vnic_dev_notify_unset(struct vnic_dev *vdev)
{
if (vdev->notify) {
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify, vdev->notify_pa);
}
return vnic_dev_notify_unsetcmd(vdev);
}
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
unsigned int nwords = vdev->notify_sz / 4;
unsigned int i;
u32 csum;
if (!vdev->notify || !vdev->notify_sz)
return 0;
do {
csum = 0;
memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
words = (u32 *)&vdev->notify_copy;
for (i = 1; i < nwords; i++)
csum += words[i];
} while (csum != words[0]);
return 1;
}
int vnic_dev_init(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
int r = 0;
if (vnic_dev_capable(vdev, CMD_INIT))
r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
else {
vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
if (a0 & CMD_INITF_DEFAULT_MAC) {
/* Emulate these for old CMD_INIT_v1 which
* didn't pass a0 so no CMD_INITF_*.
*/
vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
}
}
return r;
}
int vnic_dev_deinit(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
}
void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
{
/* Default: hardware intr coal timer is in units of 1.5 usecs */
vdev->intr_coal_timer_info.mul = 2;
vdev->intr_coal_timer_info.div = 3;
vdev->intr_coal_timer_info.max_usec =
vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
}
int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
{
int wait = 1000;
int err;
memset(vdev->args, 0, sizeof(vdev->args));
if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
else
err = ERR_ECMDUNKNOWN;
/* Use defaults when firmware doesn't support the devcmd at all or
* supports it for only specific hardware
*/
if ((err == ERR_ECMDUNKNOWN) ||
(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n");
vnic_dev_intr_coal_timer_info_default(vdev);
return 0;
}
if (!err) {
vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
}
return err;
}
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_state;
}
u32 vnic_dev_port_speed(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.port_speed;
}
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.msglvl;
}
u32 vnic_dev_mtu(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.mtu;
}
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode)
{
vdev->intr_mode = intr_mode;
}
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
struct vnic_dev *vdev)
{
return vdev->intr_mode;
}
u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
{
return (usec * vdev->intr_coal_timer_info.mul) /
vdev->intr_coal_timer_info.div;
}
u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
{
return (hw_cycles * vdev->intr_coal_timer_info.div) /
vdev->intr_coal_timer_info.mul;
}
u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
{
return vdev->intr_coal_timer_info.max_usec;
}
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify, vdev->notify_pa);
if (vdev->stats)
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
vnic_dev_deinit_devcmd2(vdev);
kfree(vdev);
}
}
EXPORT_SYMBOL(vnic_dev_unregister);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars)
{
if (!vdev) {
vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
if (!vdev)
return NULL;
}
vdev->priv = priv;
vdev->pdev = pdev;
if (vnic_dev_discover_res(vdev, bar, num_bars))
goto err_out;
return vdev;
err_out:
vnic_dev_unregister(vdev);
return NULL;
}
EXPORT_SYMBOL(vnic_dev_register);
struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
{
return vdev->pdev;
}
EXPORT_SYMBOL(vnic_dev_get_pdev);
int vnic_devcmd_init(struct vnic_dev *vdev)
{
void __iomem *res;
int err;
res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
if (res) {
err = vnic_dev_init_devcmd2(vdev);
if (err)
vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n",
err);
else
return 0;
} else {
vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
}
err = vnic_dev_init_devcmd1(vdev);
if (err)
vdev_err(vdev, "DEVCMD1 initialization failed: %d\n", err);
return err;
}
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{
u64 a0, a1 = len;
int wait = 1000;
dma_addr_t prov_pa;
void *prov_buf;
int ret;
prov_buf = dma_alloc_coherent(&vdev->pdev->dev, len, &prov_pa, GFP_ATOMIC);
if (!prov_buf)
return -ENOMEM;
memcpy(prov_buf, buf, len);
a0 = prov_pa;
ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
dma_free_coherent(&vdev->pdev->dev, len, prov_buf, prov_pa);
return ret;
}
int vnic_dev_enable2(struct vnic_dev *vdev, int active)
{
u64 a0, a1 = 0;
int wait = 1000;
a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
}
static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int *status)
{
u64 a0 = cmd, a1 = 0;
int wait = 1000;
int ret;
ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
if (!ret)
*status = (int)a0;
return ret;
}
int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
{
return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
}
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
{
return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
}
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
u64 a0, a1;
int wait = 1000;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a0)[i] = mac_addr[i];
return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
}
/* vnic_dev_classifier: Add/Delete classifier entries
* @vdev: vdev of the device
* @cmd: CLSF_ADD for Add filter
* CLSF_DEL for Delete filter
* @entry: In case of ADD filter, the caller passes the RQ number in this
* variable.
*
* This function stores the filter_id returned by the firmware in the
* same variable before return;
*
* In case of DEL filter, the caller passes the RQ number. Return
* value is irrelevant.
* @data: filter data
*/
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter *data)
{
u64 a0, a1;
int wait = 1000;
dma_addr_t tlv_pa;
int ret = -EINVAL;
struct filter_tlv *tlv, *tlv_va;
struct filter_action *action;
u64 tlv_size;
if (cmd == CLSF_ADD) {
tlv_size = sizeof(struct filter) +
sizeof(struct filter_action) +
2 * sizeof(struct filter_tlv);
tlv_va = dma_alloc_coherent(&vdev->pdev->dev, tlv_size,
&tlv_pa, GFP_ATOMIC);
if (!tlv_va)
return -ENOMEM;
tlv = tlv_va;
a0 = tlv_pa;
a1 = tlv_size;
memset(tlv, 0, tlv_size);
tlv->type = CLSF_TLV_FILTER;
tlv->length = sizeof(struct filter);
*(struct filter *)&tlv->val = *data;
tlv = (struct filter_tlv *)((char *)tlv +
sizeof(struct filter_tlv) +
sizeof(struct filter));
tlv->type = CLSF_TLV_ACTION;
tlv->length = sizeof(struct filter_action);
action = (struct filter_action *)&tlv->val;
action->type = FILTER_ACTION_RQ_STEERING;
action->u.rq_idx = *entry;
ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
*entry = (u16)a0;
dma_free_coherent(&vdev->pdev->dev, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
a0 = *entry;
ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
}
return ret;
}
int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
{
u64 a0 = overlay;
u64 a1 = config;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
}
int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
u16 vxlan_udp_port_number)
{
u64 a1 = vxlan_udp_port_number;
u64 a0 = overlay;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
}
int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
u64 *supported_versions, u64 *a1)
{
u64 a0 = feature;
int wait = 1000;
int ret;
ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, a1, wait);
if (!ret)
*supported_versions = a0;
return ret;
}
int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type)
{
u64 a0 = CMD_NIC_CFG, a1 = 0;
int wait = 1000;
int err;
err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
/* rss_hash_type is valid only when a0 is 1. Adapter which does not
* support CMD_CAPABILITY for rss_hash_type has a0 = 0
*/
if (err || (a0 != 1))
return -EOPNOTSUPP;
a1 = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) &
NIC_CFG_RSS_HASH_TYPE_MASK_FIELD;
*rss_hash_type = (u8)a1;
return 0;
}
|
linux-master
|
drivers/net/ethernet/cisco/enic/vnic_dev.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_rq.h"
#include "enic.h"
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!rq->bufs[i])
return -ENOMEM;
}
for (i = 0; i < blks; i++) {
buf = rq->bufs[i];
for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
buf->desc = (u8 *)rq->ring.descs +
rq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = rq->bufs[0];
break;
} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
buf->next = rq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
rq->to_use = rq->to_clean = rq->bufs[0];
return 0;
}
void vnic_rq_free(struct vnic_rq *rq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = rq->vdev;
vnic_dev_free_desc_ring(vdev, &rq->ring);
for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
if (rq->bufs[i]) {
kfree(rq->bufs[i]);
rq->bufs[i] = NULL;
}
}
rq->ctrl = NULL;
}
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
rq->index = index;
rq->vdev = vdev;
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
vdev_err(vdev, "Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
vnic_rq_disable(rq);
err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_rq_alloc_bufs(rq);
if (err) {
vnic_rq_free(rq);
return err;
}
return 0;
}
static void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = rq->ring.desc_count;
paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &rq->ctrl->ring_base);
iowrite32(count, &rq->ctrl->ring_size);
iowrite32(cq_index, &rq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
iowrite32(0, &rq->ctrl->dropped_packet_count);
iowrite32(0, &rq->ctrl->error_status);
iowrite32(fetch_index, &rq->ctrl->fetch_index);
iowrite32(posted_index, &rq->ctrl->posted_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
vnic_rq_init_start(rq, cq_index, 0, 0, error_interrupt_enable,
error_interrupt_offset);
}
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
{
return ioread32(&rq->ctrl->error_status);
}
void vnic_rq_enable(struct vnic_rq *rq)
{
iowrite32(1, &rq->ctrl->enable);
}
int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
struct vnic_dev *vdev = rq->vdev;
int i;
/* Due to a race condition with clearing RQ "mini-cache" in hw, we need
* to disable the RQ twice to guarantee that stale descriptors are not
* used when this RQ is re-enabled.
*/
for (i = 0; i < 2; i++) {
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 20000; wait > 0; wait--)
if (!ioread32(&rq->ctrl->running))
break;
if (!wait) {
vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
rq->index);
return -ETIMEDOUT;
}
}
return 0;
}
void vnic_rq_clean(struct vnic_rq *rq,
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
{
struct vnic_rq_buf *buf;
u32 fetch_index;
unsigned int count = rq->ring.desc_count;
int i;
buf = rq->to_clean;
for (i = 0; i < rq->ring.desc_count; i++) {
(*buf_clean)(rq, buf);
buf = buf->next;
}
rq->ring.desc_avail = rq->ring.desc_count - 1;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
/* Hardware surprise removal: reset fetch_index */
fetch_index = 0;
}
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
iowrite32(fetch_index, &rq->ctrl->posted_index);
/* Anytime we write fetch_index, we need to re-write 0 to rq->enable
* to re-sync internal VIC state.
*/
iowrite32(0, &rq->ctrl->enable);
vnic_dev_clear_desc_ring(&rq->ring);
}
|
linux-master
|
drivers/net/ethernet/cisco/enic/vnic_rq.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "enic.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL);
if (!wq->bufs[i])
return -ENOMEM;
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) {
buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
buf->next->prev = buf;
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) {
buf->next = wq->bufs[i + 1];
buf->next->prev = buf;
} else {
buf->next = buf + 1;
buf->next->prev = buf;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void vnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
if (wq->bufs[i]) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
}
wq->ctrl = NULL;
}
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
vdev_err(vdev, "Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
vnic_wq_free(wq);
return err;
}
return 0;
}
int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = 0;
wq->vdev = vdev;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
if (!wq->ctrl)
return -EINVAL;
vnic_wq_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
return err;
}
void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = wq->ring.desc_count;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(count, &wq->ctrl->ring_size);
iowrite32(fetch_index, &wq->ctrl->fetch_index);
iowrite32(posted_index, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
wq->to_use = wq->to_clean =
&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)]
[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)];
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
enic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
}
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void vnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
struct vnic_dev *vdev = wq->vdev;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 1000; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(10);
}
vdev_neterr(vdev, "Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
buf = wq->to_clean;
while (vnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}
|
linux-master
|
drivers/net/ethernet/cisco/enic/vnic_wq.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2022 MediaTek Inc.
*
* Author: Lorenzo Bianconi <[email protected]>
* Sujuan Chen <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/bitfield.h>
#include "mtk_wed.h"
#include "mtk_wed_regs.h"
#include "mtk_wed_wo.h"
static u32
mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
{
u32 val;
if (regmap_read(wo->mmio.regs, reg, &val))
val = ~0;
return val;
}
static void
mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
regmap_write(wo->mmio.regs, reg, val);
}
static u32
mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
{
u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
}
static void
mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
{
mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
}
static void
mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
{
mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
}
static void
mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
{
unsigned long flags;
spin_lock_irqsave(&wo->mmio.lock, flags);
wo->mmio.irq_mask &= ~mask;
wo->mmio.irq_mask |= val;
if (set)
mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
spin_unlock_irqrestore(&wo->mmio.lock, flags);
}
static void
mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
{
mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
tasklet_schedule(&wo->mmio.irq_tasklet);
}
static void
mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
{
mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
}
static void
mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
{
mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
}
static void
mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
u32 val)
{
wmb();
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
}
static void *
mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
bool flush)
{
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
int index = (q->tail + 1) % q->n_desc;
struct mtk_wed_wo_queue_entry *entry;
struct mtk_wed_wo_queue_desc *desc;
void *buf;
if (!q->queued)
return NULL;
if (flush)
q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
return NULL;
q->tail = index;
q->queued--;
desc = &q->desc[index];
entry = &q->entry[index];
buf = entry->buf;
if (len)
*len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
le32_to_cpu(READ_ONCE(desc->ctrl)));
if (buf)
dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
DMA_FROM_DEVICE);
entry->buf = NULL;
return buf;
}
static int
mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
bool rx)
{
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
int n_buf = 0;
while (q->queued < q->n_desc) {
struct mtk_wed_wo_queue_entry *entry;
dma_addr_t addr;
void *buf;
buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
skb_free_frag(buf);
break;
}
q->head = (q->head + 1) % q->n_desc;
entry = &q->entry[q->head];
entry->addr = addr;
entry->len = q->buf_size;
q->entry[q->head].buf = buf;
if (rx) {
struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
entry->len);
WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
}
q->queued++;
n_buf++;
}
return n_buf;
}
static void
mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
{
mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
}
static void
mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
for (;;) {
struct mtk_wed_mcu_hdr *hdr;
struct sk_buff *skb;
void *data;
u32 len;
data = mtk_wed_wo_dequeue(wo, q, &len, false);
if (!data)
break;
skb = build_skb(data, q->buf_size);
if (!skb) {
skb_free_frag(data);
continue;
}
__skb_put(skb, len);
if (mtk_wed_mcu_check_msg(wo, skb)) {
dev_kfree_skb(skb);
continue;
}
hdr = (struct mtk_wed_mcu_hdr *)skb->data;
if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
mtk_wed_mcu_rx_event(wo, skb);
else
mtk_wed_mcu_rx_unsolicited_event(wo, skb);
}
if (mtk_wed_wo_queue_refill(wo, q, true)) {
u32 index = (q->head - 1) % q->n_desc;
mtk_wed_wo_queue_kick(wo, q, index);
}
}
static irqreturn_t
mtk_wed_wo_irq_handler(int irq, void *data)
{
struct mtk_wed_wo *wo = data;
mtk_wed_wo_set_isr(wo, 0);
tasklet_schedule(&wo->mmio.irq_tasklet);
return IRQ_HANDLED;
}
static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
{
struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
u32 intr, mask;
/* disable interrupts */
mtk_wed_wo_set_isr(wo, 0);
intr = mtk_wed_wo_get_isr(wo);
intr &= wo->mmio.irq_mask;
mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
mtk_wed_wo_irq_disable(wo, mask);
if (intr & MTK_WED_WO_RXCH_INT_MASK) {
mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
mtk_wed_wo_rx_complete(wo);
}
}
/* mtk wed wo hw queues */
static int
mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
int n_desc, int buf_size, int index,
struct mtk_wed_wo_queue_regs *regs)
{
q->regs = *regs;
q->n_desc = n_desc;
q->buf_size = buf_size;
q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
&q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
return 0;
}
static void
mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
q->desc_dma);
}
static void
mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
struct page *page;
int i;
for (i = 0; i < q->n_desc; i++) {
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
DMA_TO_DEVICE);
skb_free_frag(entry->buf);
entry->buf = NULL;
}
if (!q->cache.va)
return;
page = virt_to_page(q->cache.va);
__page_frag_cache_drain(page, q->cache.pagecnt_bias);
memset(&q->cache, 0, sizeof(q->cache));
}
static void
mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
struct page *page;
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
if (!buf)
break;
skb_free_frag(buf);
}
if (!q->cache.va)
return;
page = virt_to_page(q->cache.va);
__page_frag_cache_drain(page, q->cache.pagecnt_bias);
memset(&q->cache, 0, sizeof(q->cache));
}
static void
mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
}
int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
struct sk_buff *skb)
{
struct mtk_wed_wo_queue_entry *entry;
struct mtk_wed_wo_queue_desc *desc;
int ret = 0, index;
u32 ctrl;
q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
index = (q->head + 1) % q->n_desc;
if (q->tail == index) {
ret = -ENOMEM;
goto out;
}
entry = &q->entry[index];
if (skb->len > entry->len) {
ret = -ENOMEM;
goto out;
}
desc = &q->desc[index];
q->head = index;
dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
DMA_TO_DEVICE);
memcpy(entry->buf, skb->data, skb->len);
dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
DMA_TO_DEVICE);
ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
mtk_wed_wo_queue_kick(wo, q, q->head);
mtk_wed_wo_kickout(wo);
out:
dev_kfree_skb(skb);
return ret;
}
static int
mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
{
return 0;
}
static int
mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
{
struct mtk_wed_wo_queue_regs regs;
struct device_node *np;
int ret;
np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
if (!np)
return -ENODEV;
wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
if (IS_ERR(wo->mmio.regs)) {
ret = PTR_ERR(wo->mmio.regs);
goto error_put;
}
wo->mmio.irq = irq_of_parse_and_map(np, 0);
wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
spin_lock_init(&wo->mmio.lock);
tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
KBUILD_MODNAME, wo);
if (ret)
goto error;
regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
®s);
if (ret)
goto error;
mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
mtk_wed_wo_queue_reset(wo, &wo->q_tx);
regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
®s);
if (ret)
goto error;
mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
mtk_wed_wo_queue_reset(wo, &wo->q_rx);
/* rx queue irqmask */
mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
return 0;
error:
devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
error_put:
of_node_put(np);
return ret;
}
static void
mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
{
/* disable interrupts */
mtk_wed_wo_set_isr(wo, 0);
tasklet_disable(&wo->mmio.irq_tasklet);
disable_irq(wo->mmio.irq);
devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
mtk_wed_wo_queue_free(wo, &wo->q_tx);
mtk_wed_wo_queue_free(wo, &wo->q_rx);
}
int mtk_wed_wo_init(struct mtk_wed_hw *hw)
{
struct mtk_wed_wo *wo;
int ret;
wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
if (!wo)
return -ENOMEM;
hw->wed_wo = wo;
wo->hw = hw;
ret = mtk_wed_wo_hardware_init(wo);
if (ret)
return ret;
ret = mtk_wed_mcu_init(wo);
if (ret)
return ret;
return mtk_wed_wo_exception_init(wo);
}
void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
{
struct mtk_wed_wo *wo = hw->wed_wo;
mtk_wed_wo_hw_deinit(wo);
}
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include "mtk_eth_soc.h"
struct mtk_flow_addr_info
{
void *src, *dest;
u16 *src_port, *dest_port;
bool ipv6;
};
static const char *mtk_foe_entry_state_str(int state)
{
static const char * const state_str[] = {
[MTK_FOE_STATE_INVALID] = "INV",
[MTK_FOE_STATE_UNBIND] = "UNB",
[MTK_FOE_STATE_BIND] = "BND",
[MTK_FOE_STATE_FIN] = "FIN",
};
if (state >= ARRAY_SIZE(state_str) || !state_str[state])
return "UNK";
return state_str[state];
}
static const char *mtk_foe_pkt_type_str(int type)
{
static const char * const type_str[] = {
[MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
[MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
[MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
[MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD",
};
if (type >= ARRAY_SIZE(type_str) || !type_str[type])
return "UNKNOWN";
return type_str[type];
}
static void
mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
{
__be32 n_addr[4];
int i;
if (!ipv6) {
seq_printf(m, "%pI4h", addr);
return;
}
for (i = 0; i < ARRAY_SIZE(n_addr); i++)
n_addr[i] = htonl(addr[i]);
seq_printf(m, "%pI6", n_addr);
}
static void
mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai)
{
mtk_print_addr(m, ai->src, ai->ipv6);
if (ai->src_port)
seq_printf(m, ":%d", *ai->src_port);
seq_printf(m, "->");
mtk_print_addr(m, ai->dest, ai->ipv6);
if (ai->dest_port)
seq_printf(m, ":%d", *ai->dest_port);
}
static int
mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
{
struct mtk_ppe *ppe = m->private;
int i;
for (i = 0; i < MTK_PPE_ENTRIES; i++) {
struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
struct mtk_foe_mac_info *l2;
struct mtk_flow_addr_info ai = {};
struct mtk_foe_accounting *acct;
unsigned char h_source[ETH_ALEN];
unsigned char h_dest[ETH_ALEN];
int type, state;
u32 ib2;
state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1);
if (!state)
continue;
if (bind && state != MTK_FOE_STATE_BIND)
continue;
acct = mtk_foe_entry_get_mib(ppe, i, NULL);
type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1);
seq_printf(m, "%05x %s %7s", i,
mtk_foe_entry_state_str(state),
mtk_foe_pkt_type_str(type));
switch (type) {
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
ai.src_port = &entry->ipv4.orig.src_port;
ai.dest_port = &entry->ipv4.orig.dest_port;
fallthrough;
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
ai.src = &entry->ipv4.orig.src_ip;
ai.dest = &entry->ipv4.orig.dest_ip;
break;
case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
ai.src_port = &entry->ipv6.src_port;
ai.dest_port = &entry->ipv6.dest_port;
fallthrough;
case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
case MTK_PPE_PKT_TYPE_IPV6_6RD:
ai.src = &entry->ipv6.src_ip;
ai.dest = &entry->ipv6.dest_ip;
ai.ipv6 = true;
break;
}
seq_printf(m, " orig=");
mtk_print_addr_info(m, &ai);
switch (type) {
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
ai.src_port = &entry->ipv4.new.src_port;
ai.dest_port = &entry->ipv4.new.dest_port;
fallthrough;
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
ai.src = &entry->ipv4.new.src_ip;
ai.dest = &entry->ipv4.new.dest_ip;
seq_printf(m, " new=");
mtk_print_addr_info(m, &ai);
break;
}
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
l2 = &entry->ipv6.l2;
ib2 = entry->ipv6.ib2;
} else {
l2 = &entry->ipv4.l2;
ib2 = entry->ipv4.ib2;
}
*((__be32 *)h_source) = htonl(l2->src_mac_hi);
*((__be16 *)&h_source[4]) = htons(l2->src_mac_lo);
*((__be32 *)h_dest) = htonl(l2->dest_mac_hi);
*((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
seq_printf(m, " eth=%pM->%pM etype=%04x"
" vlan=%d,%d ib1=%08x ib2=%08x"
" packets=%llu bytes=%llu\n",
h_source, h_dest, ntohs(l2->etype),
l2->vlan1, l2->vlan2, entry->ib1, ib2,
acct ? acct->packets : 0, acct ? acct->bytes : 0);
}
return 0;
}
static int
mtk_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, false);
}
DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_all);
static int
mtk_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, true);
}
DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_bind);
int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
{
struct dentry *root;
snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
root = debugfs_create_dir(ppe->dirname, NULL);
debugfs_create_file("entries", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_all_fops);
debugfs_create_file("bind", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_bind_fops);
return 0;
}
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Felix Fietkau <[email protected]>
*/
#include <linux/if_ether.h>
#include <linux/rhashtable.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
struct mtk_flow_data {
struct ethhdr eth;
union {
struct {
__be32 src_addr;
__be32 dst_addr;
} v4;
struct {
struct in6_addr src_addr;
struct in6_addr dst_addr;
} v6;
};
__be16 src_port;
__be16 dst_port;
u16 vlan_in;
struct {
u16 id;
__be16 proto;
u8 num;
} vlan;
struct {
u16 sid;
u8 num;
} pppoe;
};
static const struct rhashtable_params mtk_flow_ht_params = {
.head_offset = offsetof(struct mtk_flow_entry, node),
.key_offset = offsetof(struct mtk_flow_entry, cookie),
.key_len = sizeof(unsigned long),
.automatic_shrinking = true,
};
static int
mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
struct mtk_flow_data *data, bool egress)
{
return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
data->v4.src_addr, data->src_port,
data->v4.dst_addr, data->dst_port);
}
static int
mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
struct mtk_flow_data *data)
{
return mtk_foe_entry_set_ipv6_tuple(eth, foe,
data->v6.src_addr.s6_addr32, data->src_port,
data->v6.dst_addr.s6_addr32, data->dst_port);
}
static void
mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
{
void *dest = eth + act->mangle.offset;
const void *src = &act->mangle.val;
if (act->mangle.offset > 8)
return;
if (act->mangle.mask == 0xffff) {
src += 2;
dest += 2;
}
memcpy(dest, src, act->mangle.mask ? 2 : 4);
}
static int
mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
{
struct net_device_path_stack stack;
struct net_device_path *path;
int err;
if (!dev)
return -ENODEV;
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
return -1;
err = dev_fill_forward_path(dev, addr, &stack);
if (err)
return err;
path = &stack.path[stack.num_paths - 1];
if (path->type != DEV_PATH_MTK_WDMA)
return -1;
info->wdma_idx = path->mtk_wdma.wdma_idx;
info->queue = path->mtk_wdma.queue;
info->bss = path->mtk_wdma.bss;
info->wcid = path->mtk_wdma.wcid;
return 0;
}
static int
mtk_flow_mangle_ports(const struct flow_action_entry *act,
struct mtk_flow_data *data)
{
u32 val = ntohl(act->mangle.val);
switch (act->mangle.offset) {
case 0:
if (act->mangle.mask == ~htonl(0xffff))
data->dst_port = cpu_to_be16(val);
else
data->src_port = cpu_to_be16(val >> 16);
break;
case 2:
data->dst_port = cpu_to_be16(val);
break;
default:
return -EINVAL;
}
return 0;
}
static int
mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
struct mtk_flow_data *data)
{
__be32 *dest;
switch (act->mangle.offset) {
case offsetof(struct iphdr, saddr):
dest = &data->v4.src_addr;
break;
case offsetof(struct iphdr, daddr):
dest = &data->v4.dst_addr;
break;
default:
return -EINVAL;
}
memcpy(dest, &act->mangle.val, sizeof(u32));
return 0;
}
static int
mtk_flow_get_dsa_port(struct net_device **dev)
{
#if IS_ENABLED(CONFIG_NET_DSA)
struct dsa_port *dp;
dp = dsa_port_from_netdev(*dev);
if (IS_ERR(dp))
return -ENODEV;
if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
return -ENODEV;
*dev = dsa_port_to_master(dp);
return dp->index;
#else
return -ENODEV;
#endif
}
static int
mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
struct net_device *dev, const u8 *dest_mac,
int *wed_index)
{
struct mtk_wdma_info info = {};
int pse_port, dsa_port, queue;
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
info.bss, info.wcid);
if (mtk_is_netsys_v2_or_greater(eth)) {
switch (info.wdma_idx) {
case 0:
pse_port = 8;
break;
case 1:
pse_port = 9;
break;
default:
return -EINVAL;
}
} else {
pse_port = 3;
}
*wed_index = info.wdma_idx;
goto out;
}
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dev == eth->netdev[0])
pse_port = PSE_GDM1_PORT;
else if (dev == eth->netdev[1])
pse_port = PSE_GDM2_PORT;
else if (dev == eth->netdev[2])
pse_port = PSE_GDM3_PORT;
else
return -EOPNOTSUPP;
if (dsa_port >= 0) {
mtk_foe_entry_set_dsa(eth, foe, dsa_port);
queue = 3 + dsa_port;
} else {
queue = pse_port - 1;
}
mtk_foe_entry_set_queue(eth, foe, queue);
out:
mtk_foe_entry_set_pse_port(eth, foe, pse_port);
return 0;
}
static int
mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
int ppe_index)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_action_entry *act;
struct mtk_flow_data data = {};
struct mtk_foe_entry foe;
struct net_device *odev = NULL;
struct mtk_flow_entry *entry;
int offload_type = 0;
int wed_index = -1;
u16 addr_type = 0;
u8 l4proto = 0;
int err = 0;
int i;
if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params))
return -EEXIST;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
struct flow_match_meta match;
flow_rule_match_meta(rule, &match);
} else {
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
} else {
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
l4proto = match.key->ip_proto;
} else {
return -EOPNOTSUPP;
}
switch (addr_type) {
case 0:
offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
flow_rule_match_eth_addrs(rule, &match);
memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
} else {
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
return -EOPNOTSUPP;
data.vlan_in = match.key->vlan_id;
}
break;
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
break;
default:
return -EOPNOTSUPP;
}
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_MANGLE:
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
return -EOPNOTSUPP;
if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
mtk_flow_offload_mangle_eth(act, &data.eth);
break;
case FLOW_ACTION_REDIRECT:
odev = act->dev;
break;
case FLOW_ACTION_CSUM:
break;
case FLOW_ACTION_VLAN_PUSH:
if (data.vlan.num == 1 ||
act->vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
data.vlan.id = act->vlan.vid;
data.vlan.proto = act->vlan.proto;
data.vlan.num++;
break;
case FLOW_ACTION_VLAN_POP:
break;
case FLOW_ACTION_PPPOE_PUSH:
if (data.pppoe.num == 1)
return -EOPNOTSUPP;
data.pppoe.sid = act->pppoe.sid;
data.pppoe.num++;
break;
default:
return -EOPNOTSUPP;
}
}
if (!is_valid_ether_addr(data.eth.h_source) ||
!is_valid_ether_addr(data.eth.h_dest))
return -EINVAL;
err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
data.eth.h_source, data.eth.h_dest);
if (err)
return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports ports;
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
return -EOPNOTSUPP;
flow_rule_match_ports(rule, &ports);
data.src_port = ports.key->src;
data.dst_port = ports.key->dst;
} else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs addrs;
flow_rule_match_ipv4_addrs(rule, &addrs);
data.v4.src_addr = addrs.key->src;
data.v4.dst_addr = addrs.key->dst;
mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs addrs;
flow_rule_match_ipv6_addrs(rule, &addrs);
data.v6.src_addr = addrs.key->src;
data.v6.dst_addr = addrs.key->dst;
mtk_flow_set_ipv6_addr(eth, &foe, &data);
}
flow_action_for_each(i, act, &rule->action) {
if (act->id != FLOW_ACTION_MANGLE)
continue;
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
return -EOPNOTSUPP;
switch (act->mangle.htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
err = mtk_flow_mangle_ports(act, &data);
break;
case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
err = mtk_flow_mangle_ipv4(act, &data);
break;
case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
/* handled earlier */
break;
default:
return -EOPNOTSUPP;
}
if (err)
return err;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
if (err)
return err;
}
if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
foe.bridge.vlan = data.vlan_in;
if (data.vlan.num == 1) {
if (data.vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
}
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
&wed_index);
if (err)
return err;
if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
return err;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->cookie = f->cookie;
memcpy(&entry->data, &foe, sizeof(entry->data));
entry->wed_index = wed_index;
entry->ppe_index = ppe_index;
err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
if (err < 0)
goto free;
err = rhashtable_insert_fast(ð->flow_table, &entry->node,
mtk_flow_ht_params);
if (err < 0)
goto clear;
return 0;
clear:
mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
free:
kfree(entry);
if (wed_index >= 0)
mtk_wed_flow_remove(wed_index);
return err;
}
static int
mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
{
struct mtk_flow_entry *entry;
entry = rhashtable_lookup(ð->flow_table, &f->cookie,
mtk_flow_ht_params);
if (!entry)
return -ENOENT;
mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
rhashtable_remove_fast(ð->flow_table, &entry->node,
mtk_flow_ht_params);
if (entry->wed_index >= 0)
mtk_wed_flow_remove(entry->wed_index);
kfree(entry);
return 0;
}
static int
mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
{
struct mtk_flow_entry *entry;
struct mtk_foe_accounting diff;
u32 idle;
entry = rhashtable_lookup(ð->flow_table, &f->cookie,
mtk_flow_ht_params);
if (!entry)
return -ENOENT;
idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
f->stats.lastused = jiffies - idle * HZ;
if (entry->hash != 0xFFFF &&
mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
&diff)) {
f->stats.pkts += diff.packets;
f->stats.bytes += diff.bytes;
}
return 0;
}
static DEFINE_MUTEX(mtk_flow_offload_mutex);
int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
int ppe_index)
{
int err;
mutex_lock(&mtk_flow_offload_mutex);
switch (cls->command) {
case FLOW_CLS_REPLACE:
err = mtk_flow_offload_replace(eth, cls, ppe_index);
break;
case FLOW_CLS_DESTROY:
err = mtk_flow_offload_destroy(eth, cls);
break;
case FLOW_CLS_STATS:
err = mtk_flow_offload_stats(eth, cls);
break;
default:
err = -EOPNOTSUPP;
break;
}
mutex_unlock(&mtk_flow_offload_mutex);
return err;
}
static int
mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{
struct flow_cls_offload *cls = type_data;
struct net_device *dev = cb_priv;
struct mtk_mac *mac;
struct mtk_eth *eth;
mac = netdev_priv(dev);
eth = mac->hw;
if (!tc_can_offload(dev))
return -EOPNOTSUPP;
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
return mtk_flow_offload_cmd(eth, cls, 0);
}
static int
mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
static LIST_HEAD(block_cb_list);
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
if (!eth->soc->offload_version)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
cb = mtk_eth_setup_tc_block_cb;
f->driver_block_list = &block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
block_cb = flow_block_cb_lookup(f->block, cb, dev);
if (block_cb) {
flow_block_cb_incref(block_cb);
return 0;
}
block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
flow_block_cb_incref(block_cb);
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f->block, cb, dev);
if (!block_cb)
return -ENOENT;
if (!flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
}
return 0;
default:
return -EOPNOTSUPP;
}
}
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
case TC_SETUP_BLOCK:
case TC_SETUP_FT:
return mtk_eth_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
}
int mtk_eth_offload_init(struct mtk_eth *eth)
{
return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);
}
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
#include <linux/kernel.h>
#include <linux/soc/mediatek/mtk_wed.h>
const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_wed_ops.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2009-2016 John Crispin <[email protected]>
* Copyright (C) 2009-2016 Felix Fietkau <[email protected]>
* Copyright (C) 2013-2016 Michael Lee <[email protected]>
*/
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/if_vlan.h>
#include <linux/reset.h>
#include <linux/tcp.h>
#include <linux/interrupt.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
#include <linux/pcs/pcs-mtk-lynxi.h>
#include <linux/jhash.h>
#include <linux/bitfield.h>
#include <net/dsa.h>
#include <net/dst_metadata.h>
#include <net/page_pool/helpers.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
static int mtk_msg_level = -1;
module_param_named(msg_level, mtk_msg_level, int, 0);
MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, xdp_stats.x) / \
sizeof(u64) }
static const struct mtk_reg_map mtk_reg_map = {
.tx_irq_mask = 0x1a1c,
.tx_irq_status = 0x1a18,
.pdma = {
.rx_ptr = 0x0900,
.rx_cnt_cfg = 0x0904,
.pcrx_ptr = 0x0908,
.glo_cfg = 0x0a04,
.rst_idx = 0x0a08,
.delay_irq = 0x0a0c,
.irq_status = 0x0a20,
.irq_mask = 0x0a28,
.adma_rx_dbg0 = 0x0a38,
.int_grp = 0x0a50,
},
.qdma = {
.qtx_cfg = 0x1800,
.qtx_sch = 0x1804,
.rx_ptr = 0x1900,
.rx_cnt_cfg = 0x1904,
.qcrx_ptr = 0x1908,
.glo_cfg = 0x1a04,
.rst_idx = 0x1a08,
.delay_irq = 0x1a0c,
.fc_th = 0x1a10,
.tx_sch_rate = 0x1a14,
.int_grp = 0x1a20,
.hred = 0x1a44,
.ctx_ptr = 0x1b00,
.dtx_ptr = 0x1b04,
.crx_ptr = 0x1b10,
.drx_ptr = 0x1b14,
.fq_head = 0x1b20,
.fq_tail = 0x1b24,
.fq_count = 0x1b28,
.fq_blen = 0x1b2c,
},
.gdm1_cnt = 0x2400,
.gdma_to_ppe = 0x4444,
.ppe_base = 0x0c00,
.wdma_base = {
[0] = 0x2800,
[1] = 0x2c00,
},
.pse_iq_sta = 0x0110,
.pse_oq_sta = 0x0118,
};
static const struct mtk_reg_map mt7628_reg_map = {
.tx_irq_mask = 0x0a28,
.tx_irq_status = 0x0a20,
.pdma = {
.rx_ptr = 0x0900,
.rx_cnt_cfg = 0x0904,
.pcrx_ptr = 0x0908,
.glo_cfg = 0x0a04,
.rst_idx = 0x0a08,
.delay_irq = 0x0a0c,
.irq_status = 0x0a20,
.irq_mask = 0x0a28,
.int_grp = 0x0a50,
},
};
static const struct mtk_reg_map mt7986_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
.rx_ptr = 0x6100,
.rx_cnt_cfg = 0x6104,
.pcrx_ptr = 0x6108,
.glo_cfg = 0x6204,
.rst_idx = 0x6208,
.delay_irq = 0x620c,
.irq_status = 0x6220,
.irq_mask = 0x6228,
.adma_rx_dbg0 = 0x6238,
.int_grp = 0x6250,
},
.qdma = {
.qtx_cfg = 0x4400,
.qtx_sch = 0x4404,
.rx_ptr = 0x4500,
.rx_cnt_cfg = 0x4504,
.qcrx_ptr = 0x4508,
.glo_cfg = 0x4604,
.rst_idx = 0x4608,
.delay_irq = 0x460c,
.fc_th = 0x4610,
.int_grp = 0x4620,
.hred = 0x4644,
.ctx_ptr = 0x4700,
.dtx_ptr = 0x4704,
.crx_ptr = 0x4710,
.drx_ptr = 0x4714,
.fq_head = 0x4720,
.fq_tail = 0x4724,
.fq_count = 0x4728,
.fq_blen = 0x472c,
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
.gdma_to_ppe = 0x3333,
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
[1] = 0x4c00,
},
.pse_iq_sta = 0x0180,
.pse_oq_sta = 0x01a0,
};
static const struct mtk_reg_map mt7988_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
.rx_ptr = 0x6900,
.rx_cnt_cfg = 0x6904,
.pcrx_ptr = 0x6908,
.glo_cfg = 0x6a04,
.rst_idx = 0x6a08,
.delay_irq = 0x6a0c,
.irq_status = 0x6a20,
.irq_mask = 0x6a28,
.adma_rx_dbg0 = 0x6a38,
.int_grp = 0x6a50,
},
.qdma = {
.qtx_cfg = 0x4400,
.qtx_sch = 0x4404,
.rx_ptr = 0x4500,
.rx_cnt_cfg = 0x4504,
.qcrx_ptr = 0x4508,
.glo_cfg = 0x4604,
.rst_idx = 0x4608,
.delay_irq = 0x460c,
.fc_th = 0x4610,
.int_grp = 0x4620,
.hred = 0x4644,
.ctx_ptr = 0x4700,
.dtx_ptr = 0x4704,
.crx_ptr = 0x4710,
.drx_ptr = 0x4714,
.fq_head = 0x4720,
.fq_tail = 0x4724,
.fq_count = 0x4728,
.fq_blen = 0x472c,
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
.gdma_to_ppe = 0x3333,
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
[1] = 0x4c00,
},
.pse_iq_sta = 0x0180,
.pse_oq_sta = 0x01a0,
};
/* strings used by ethtool */
static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN];
u32 offset;
} mtk_ethtool_stats[] = {
MTK_ETHTOOL_STAT(tx_bytes),
MTK_ETHTOOL_STAT(tx_packets),
MTK_ETHTOOL_STAT(tx_skip),
MTK_ETHTOOL_STAT(tx_collisions),
MTK_ETHTOOL_STAT(rx_bytes),
MTK_ETHTOOL_STAT(rx_packets),
MTK_ETHTOOL_STAT(rx_overflow),
MTK_ETHTOOL_STAT(rx_fcs_errors),
MTK_ETHTOOL_STAT(rx_short_errors),
MTK_ETHTOOL_STAT(rx_long_errors),
MTK_ETHTOOL_STAT(rx_checksum_errors),
MTK_ETHTOOL_STAT(rx_flow_control_packets),
MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
};
static const char * const mtk_clks_source_name[] = {
"ethif",
"sgmiitop",
"esw",
"gp0",
"gp1",
"gp2",
"gp3",
"xgp1",
"xgp2",
"xgp3",
"crypto",
"fe",
"trgpll",
"sgmii_tx250m",
"sgmii_rx250m",
"sgmii_cdr_ref",
"sgmii_cdr_fb",
"sgmii2_tx250m",
"sgmii2_rx250m",
"sgmii2_cdr_ref",
"sgmii2_cdr_fb",
"sgmii_ck",
"eth2pll",
"wocpu0",
"wocpu1",
"netsys0",
"netsys1",
"ethwarp_wocpu2",
"ethwarp_wocpu1",
"ethwarp_wocpu0",
"top_usxgmii0_sel",
"top_usxgmii1_sel",
"top_sgm0_sel",
"top_sgm1_sel",
"top_xfi_phy0_xtal_sel",
"top_xfi_phy1_xtal_sel",
"top_eth_gmii_sel",
"top_eth_refck_50m_sel",
"top_eth_sys_200m_sel",
"top_eth_sys_sel",
"top_eth_xgmii_sel",
"top_eth_mii_sel",
"top_netsys_sel",
"top_netsys_500m_sel",
"top_netsys_pao_2x_sel",
"top_netsys_sync_250m_sel",
"top_netsys_ppefb_250m_sel",
"top_netsys_warp_sel",
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
{
__raw_writel(val, eth->base + reg);
}
u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
{
return __raw_readl(eth->base + reg);
}
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
{
u32 val;
val = mtk_r32(eth, reg);
val &= ~mask;
val |= set;
mtk_w32(eth, val, reg);
return reg;
}
static int mtk_mdio_busy_wait(struct mtk_eth *eth)
{
unsigned long t_start = jiffies;
while (1) {
if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
return 0;
if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
break;
cond_resched();
}
dev_err(eth->dev, "mdio: MDIO timeout\n");
return -ETIMEDOUT;
}
static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
u32 write_data)
{
int ret;
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
mtk_w32(eth, PHY_IAC_ACCESS |
PHY_IAC_START_C22 |
PHY_IAC_CMD_WRITE |
PHY_IAC_REG(phy_reg) |
PHY_IAC_ADDR(phy_addr) |
PHY_IAC_DATA(write_data),
MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
return 0;
}
static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
u32 devad, u32 phy_reg, u32 write_data)
{
int ret;
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
mtk_w32(eth, PHY_IAC_ACCESS |
PHY_IAC_START_C45 |
PHY_IAC_CMD_C45_ADDR |
PHY_IAC_REG(devad) |
PHY_IAC_ADDR(phy_addr) |
PHY_IAC_DATA(phy_reg),
MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
mtk_w32(eth, PHY_IAC_ACCESS |
PHY_IAC_START_C45 |
PHY_IAC_CMD_WRITE |
PHY_IAC_REG(devad) |
PHY_IAC_ADDR(phy_addr) |
PHY_IAC_DATA(write_data),
MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
return 0;
}
static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
{
int ret;
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
mtk_w32(eth, PHY_IAC_ACCESS |
PHY_IAC_START_C22 |
PHY_IAC_CMD_C22_READ |
PHY_IAC_REG(phy_reg) |
PHY_IAC_ADDR(phy_addr),
MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
}
static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
u32 devad, u32 phy_reg)
{
int ret;
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
mtk_w32(eth, PHY_IAC_ACCESS |
PHY_IAC_START_C45 |
PHY_IAC_CMD_C45_ADDR |
PHY_IAC_REG(devad) |
PHY_IAC_ADDR(phy_addr) |
PHY_IAC_DATA(phy_reg),
MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
mtk_w32(eth, PHY_IAC_ACCESS |
PHY_IAC_START_C45 |
PHY_IAC_CMD_C45_READ |
PHY_IAC_REG(devad) |
PHY_IAC_ADDR(phy_addr),
MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
return ret;
return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
}
static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
int phy_reg, u16 val)
{
struct mtk_eth *eth = bus->priv;
return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
}
static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
int devad, int phy_reg, u16 val)
{
struct mtk_eth *eth = bus->priv;
return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
}
static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
{
struct mtk_eth *eth = bus->priv;
return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
}
static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
int phy_reg)
{
struct mtk_eth *eth = bus->priv;
return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
}
static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
phy_interface_t interface)
{
u32 val;
val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
ETHSYS_TRGMII_MT7621_MASK, val);
return 0;
}
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
phy_interface_t interface)
{
int ret;
if (interface == PHY_INTERFACE_MODE_TRGMII) {
mtk_w32(eth, TRGMII_MODE, INTF_MODE);
ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
if (ret)
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
return;
}
dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
}
static void mtk_setup_bridge_switch(struct mtk_eth *eth)
{
/* Force Port1 XGMAC Link Up */
mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
MTK_XGMAC_STS(MTK_GMAC1_ID));
/* Adjust GSW bridge IPG to 11 */
mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
(GSW_IPG_11 << GSWTX_IPG_SHIFT) |
(GSW_IPG_11 << GSWRX_IPG_SHIFT),
MTK_GSW_CFG);
}
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
unsigned int sid;
if (interface == PHY_INTERFACE_MODE_SGMII ||
phy_interface_mode_is_8023z(interface)) {
sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
0 : mac->id;
return eth->sgmii_pcs[sid];
}
return NULL;
}
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
int val, ge_mode, err = 0;
u32 i;
/* MT76x8 has no hardware settings between for the MAC */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
mac->interface != state->interface) {
/* Setup soc pin functions */
switch (state->interface) {
case PHY_INTERFACE_MODE_TRGMII:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_MII:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
err = mtk_gmac_rgmii_path_setup(eth, mac->id);
if (err)
goto init_err;
}
break;
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
err = mtk_gmac_sgmii_path_setup(eth, mac->id);
if (err)
goto init_err;
break;
case PHY_INTERFACE_MODE_GMII:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
err = mtk_gmac_gephy_path_setup(eth, mac->id);
if (err)
goto init_err;
}
break;
case PHY_INTERFACE_MODE_INTERNAL:
break;
default:
goto err_phy;
}
/* Setup clock for 1st gmac */
if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
!phy_interface_mode_is_8023z(state->interface) &&
MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
if (MTK_HAS_CAPS(mac->hw->soc->caps,
MTK_TRGMII_MT7621_CLK)) {
if (mt7621_gmac0_rgmii_adjust(mac->hw,
state->interface))
goto err_phy;
} else {
mtk_gmac0_rgmii_adjust(mac->hw,
state->interface);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
mtk_w32(mac->hw,
TD_DM_DRVP(8) | TD_DM_DRVN(8),
TRGMII_TD_ODT(i));
/* Assert/release MT7623 RXC reset */
mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
TRGMII_RCK_CTRL);
mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
}
}
switch (state->interface) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
ge_mode = 1;
break;
default:
ge_mode = 0;
break;
}
/* put the gmac into the right mode */
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
mac->interface = state->interface;
}
/* SGMII */
if (state->interface == PHY_INTERFACE_MODE_SGMII ||
phy_interface_mode_is_8023z(state->interface)) {
/* The path GMAC to SGMII will be enabled once the SGMIISYS is
* being setup done.
*/
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
SYSCFG0_SGMII_MASK,
~(u32)SYSCFG0_SGMII_MASK);
/* Save the syscfg0 value for mac_finish */
mac->syscfg0 = val;
} else if (phylink_autoneg_inband(mode)) {
dev_err(eth->dev,
"In-band mode not supported in non SGMII mode!\n");
return;
}
/* Setup gmac */
if (mtk_is_netsys_v3_or_greater(eth) &&
mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
mtk_setup_bridge_switch(eth);
}
return;
err_phy:
dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
mac->id, phy_modes(state->interface));
return;
init_err:
dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
mac->id, phy_modes(state->interface), err);
}
static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new;
/* Enable SGMII */
if (interface == PHY_INTERFACE_MODE_SGMII ||
phy_interface_mode_is_8023z(interface))
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
SYSCFG0_SGMII_MASK, mac->syscfg0);
/* Setup gmac */
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
MAC_MCR_RX_FIFO_CLR_DIS;
/* Only update control register when needed! */
if (mcr_new != mcr_cur)
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
return 0;
}
static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
int speed)
{
const struct mtk_soc_data *soc = eth->soc;
u32 ofs, val;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
return;
val = MTK_QTX_SCH_MIN_RATE_EN |
/* minimum: 10 Mbps */
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
if (IS_ENABLED(CONFIG_SOC_MT7621)) {
switch (speed) {
case SPEED_10:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
break;
default:
break;
}
} else {
switch (speed) {
case SPEED_10:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_100:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
break;
case SPEED_1000:
val |= MTK_QTX_SCH_MAX_RATE_EN |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
break;
default:
break;
}
}
ofs = MTK_QTX_OFFSET * idx;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
static void mtk_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
MAC_MCR_FORCE_RX_FC);
/* Configure speed */
mac->speed = speed;
switch (speed) {
case SPEED_2500:
case SPEED_1000:
mcr |= MAC_MCR_SPEED_1000;
break;
case SPEED_100:
mcr |= MAC_MCR_SPEED_100;
break;
}
/* Configure duplex */
if (duplex == DUPLEX_FULL)
mcr |= MAC_MCR_FORCE_DPX;
/* Configure pause modes - phylink will avoid these for half duplex */
if (tx_pause)
mcr |= MAC_MCR_FORCE_TX_FC;
if (rx_pause)
mcr |= MAC_MCR_FORCE_RX_FC;
mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
static const struct phylink_mac_ops mtk_phylink_ops = {
.mac_select_pcs = mtk_mac_select_pcs,
.mac_config = mtk_mac_config,
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
};
static int mtk_mdio_init(struct mtk_eth *eth)
{
unsigned int max_clk = 2500000, divider;
struct device_node *mii_np;
int ret;
u32 val;
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
dev_err(eth->dev, "no %s child node found", "mdio-bus");
return -ENODEV;
}
if (!of_device_is_available(mii_np)) {
ret = -ENODEV;
goto err_put_node;
}
eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
ret = -ENOMEM;
goto err_put_node;
}
eth->mii_bus->name = "mdio";
eth->mii_bus->read = mtk_mdio_read_c22;
eth->mii_bus->write = mtk_mdio_write_c22;
eth->mii_bus->read_c45 = mtk_mdio_read_c45;
eth->mii_bus->write_c45 = mtk_mdio_write_c45;
eth->mii_bus->priv = eth;
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
dev_err(eth->dev, "MDIO clock frequency out of range");
ret = -EINVAL;
goto err_put_node;
}
max_clk = val;
}
divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
/* Configure MDC Turbo Mode */
if (mtk_is_netsys_v3_or_greater(eth))
mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
/* Configure MDC Divider */
val = FIELD_PREP(PPSC_MDC_CFG, divider);
if (!mtk_is_netsys_v3_or_greater(eth))
val |= PPSC_MDC_TURBO;
mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
of_node_put(mii_np);
return ret;
}
static void mtk_mdio_cleanup(struct mtk_eth *eth)
{
if (!eth->mii_bus)
return;
mdiobus_unregister(eth->mii_bus);
}
static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(ð->tx_irq_lock, flags);
val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(ð->tx_irq_lock, flags);
}
static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(ð->tx_irq_lock, flags);
val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(ð->tx_irq_lock, flags);
}
static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(ð->rx_irq_lock, flags);
val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(ð->rx_irq_lock, flags);
}
static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(ð->rx_irq_lock, flags);
val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(ð->rx_irq_lock, flags);
}
static int mtk_set_mac_address(struct net_device *dev, void *p)
{
int ret = eth_mac_addr(dev, p);
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
const char *macaddr = dev->dev_addr;
if (ret)
return ret;
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
spin_lock_bh(&mac->hw->page_lock);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
MT7628_SDM_MAC_ADRH);
mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
(macaddr[4] << 8) | macaddr[5],
MT7628_SDM_MAC_ADRL);
} else {
mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
MTK_GDMA_MAC_ADRH(mac->id));
mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
(macaddr[4] << 8) | macaddr[5],
MTK_GDMA_MAC_ADRL(mac->id));
}
spin_unlock_bh(&mac->hw->page_lock);
return 0;
}
void mtk_stats_update_mac(struct mtk_mac *mac)
{
struct mtk_hw_stats *hw_stats = mac->hw_stats;
struct mtk_eth *eth = mac->hw;
u64_stats_update_begin(&hw_stats->syncp);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
hw_stats->rx_checksum_errors +=
mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
} else {
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
unsigned int offs = hw_stats->reg_offset;
u64 stats;
hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
if (stats)
hw_stats->rx_bytes += (stats << 32);
hw_stats->rx_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
hw_stats->rx_overflow +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
hw_stats->rx_fcs_errors +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
hw_stats->rx_short_errors +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
hw_stats->rx_long_errors +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
hw_stats->rx_checksum_errors +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
if (mtk_is_netsys_v3_or_greater(eth)) {
hw_stats->tx_skip +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
hw_stats->tx_collisions +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
hw_stats->tx_bytes +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
} else {
hw_stats->tx_skip +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
hw_stats->tx_collisions +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
hw_stats->tx_bytes +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
}
}
u64_stats_update_end(&hw_stats->syncp);
}
static void mtk_stats_update(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->mac[i] || !eth->mac[i]->hw_stats)
continue;
if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
mtk_stats_update_mac(eth->mac[i]);
spin_unlock(ð->mac[i]->hw_stats->stats_lock);
}
}
}
static void mtk_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *storage)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_hw_stats *hw_stats = mac->hw_stats;
unsigned int start;
if (netif_running(dev) && netif_device_present(dev)) {
if (spin_trylock_bh(&hw_stats->stats_lock)) {
mtk_stats_update_mac(mac);
spin_unlock_bh(&hw_stats->stats_lock);
}
}
do {
start = u64_stats_fetch_begin(&hw_stats->syncp);
storage->rx_packets = hw_stats->rx_packets;
storage->tx_packets = hw_stats->tx_packets;
storage->rx_bytes = hw_stats->rx_bytes;
storage->tx_bytes = hw_stats->tx_bytes;
storage->collisions = hw_stats->tx_collisions;
storage->rx_length_errors = hw_stats->rx_short_errors +
hw_stats->rx_long_errors;
storage->rx_over_errors = hw_stats->rx_overflow;
storage->rx_crc_errors = hw_stats->rx_fcs_errors;
storage->rx_errors = hw_stats->rx_checksum_errors;
storage->tx_aborted_errors = hw_stats->tx_skip;
} while (u64_stats_fetch_retry(&hw_stats->syncp, start));
storage->tx_errors = dev->stats.tx_errors;
storage->rx_dropped = dev->stats.rx_dropped;
storage->tx_dropped = dev->stats.tx_dropped;
}
static inline int mtk_max_frag_size(int mtu)
{
/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
static inline int mtk_max_buf_size(int frag_size)
{
int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
return buf_size;
}
static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
struct mtk_rx_dma_v2 *dma_rxd)
{
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
if (!(rxd->rxd2 & RX_DMA_DONE))
return false;
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
if (mtk_is_netsys_v2_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
return true;
}
static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
{
unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
unsigned long data;
data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
get_order(size));
return (void *)data;
}
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
int cnt = MTK_QDMA_RING_SIZE;
dma_addr_t dma_addr;
int i;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
eth->scratch_ring = eth->sram_base;
else
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
cnt * soc->txrx.txd_size,
ð->phy_scratch_ring,
GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
if (unlikely(!eth->scratch_head))
return -ENOMEM;
dma_addr = dma_map_single(eth->dma_dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
struct mtk_tx_dma_v2 *txd;
txd = eth->scratch_ring + i * soc->txrx.txd_size;
txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
if (i < cnt - 1)
txd->txd2 = eth->phy_scratch_ring +
(i + 1) * soc->txrx.txd_size;
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
txd->txd4 = 0;
if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
txd->txd8 = 0;
}
}
mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
return 0;
}
static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
{
return ring->dma + (desc - ring->phys);
}
static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
void *txd, u32 txd_size)
{
int idx = (txd - ring->dma) / txd_size;
return &ring->buf[idx];
}
static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
struct mtk_tx_dma *dma)
{
return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
}
static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
{
return (dma - ring->dma) / txd_size;
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
struct xdp_frame_bulk *bq, bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
dma_unmap_single(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
} else {
if (dma_unmap_len(tx_buf, dma_len0)) {
dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
if (dma_unmap_len(tx_buf, dma_len1)) {
dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr1),
dma_unmap_len(tx_buf, dma_len1),
DMA_TO_DEVICE);
}
}
if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
if (tx_buf->type == MTK_TYPE_SKB) {
struct sk_buff *skb = tx_buf->data;
if (napi)
napi_consume_skb(skb, napi);
else
dev_kfree_skb_any(skb);
} else {
struct xdp_frame *xdpf = tx_buf->data;
if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
xdp_return_frame_rx_napi(xdpf);
else if (bq)
xdp_return_frame_bulk(xdpf, bq);
else
xdp_return_frame(xdpf);
}
}
tx_buf->flags = 0;
tx_buf->data = NULL;
}
static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
size_t size, int idx)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len0, size);
} else {
if (idx & 1) {
txd->txd3 = mapped_addr;
txd->txd2 |= TX_DMA_PLEN1(size);
dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len1, size);
} else {
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd->txd1 = mapped_addr;
txd->txd2 = TX_DMA_PLEN0(size);
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len0, size);
}
}
}
static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
struct mtk_tx_dma_desc_info *info)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *desc = txd;
u32 data;
WRITE_ONCE(desc->txd1, info->addr);
data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
FIELD_PREP(TX_DMA_PQID, info->qid);
if (info->last)
data |= TX_DMA_LS0;
WRITE_ONCE(desc->txd3, data);
data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
if (info->first) {
if (info->gso)
data |= TX_DMA_TSO;
/* tx checksum offload */
if (info->csum)
data |= TX_DMA_CHKSUM;
/* vlan header offload */
if (info->vlan)
data |= TX_DMA_INS_VLAN | info->vlan_tci;
}
WRITE_ONCE(desc->txd4, data);
}
static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
struct mtk_tx_dma_desc_info *info)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_tx_dma_v2 *desc = txd;
struct mtk_eth *eth = mac->hw;
u32 data;
WRITE_ONCE(desc->txd1, info->addr);
data = TX_DMA_PLEN0(info->size);
if (info->last)
data |= TX_DMA_LS0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
data |= TX_DMA_PREP_ADDR64(info->addr);
WRITE_ONCE(desc->txd3, data);
/* set forward port */
switch (mac->id) {
case MTK_GMAC1_ID:
data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
break;
case MTK_GMAC2_ID:
data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
break;
case MTK_GMAC3_ID:
data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
break;
}
data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
WRITE_ONCE(desc->txd4, data);
data = 0;
if (info->first) {
if (info->gso)
data |= TX_DMA_TSO_V2;
/* tx checksum offload */
if (info->csum)
data |= TX_DMA_CHKSUM_V2;
if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
data |= TX_DMA_SPTAG_V3;
}
WRITE_ONCE(desc->txd5, data);
data = 0;
if (info->first && info->vlan)
data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
WRITE_ONCE(desc->txd6, data);
WRITE_ONCE(desc->txd7, 0);
WRITE_ONCE(desc->txd8, 0);
}
static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
struct mtk_tx_dma_desc_info *info)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
if (mtk_is_netsys_v2_or_greater(eth))
mtk_tx_set_dma_desc_v2(dev, txd, info);
else
mtk_tx_set_dma_desc_v1(dev, txd, info);
}
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
struct mtk_tx_dma_desc_info txd_info = {
.size = skb_headlen(skb),
.gso = gso,
.csum = skb->ip_summed == CHECKSUM_PARTIAL,
.vlan = skb_vlan_tag_present(skb),
.qid = skb_get_queue_mapping(skb),
.vlan_tci = skb_vlan_tag_get(skb),
.first = true,
.last = !skb_is_nonlinear(skb),
};
struct netdev_queue *txq;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
int i, n_desc = 1;
int queue = skb_get_queue_mapping(skb);
int k = 0;
txq = netdev_get_tx_queue(dev, queue);
itxd = ring->next_free;
itxd_pdma = qdma_to_pdma(ring, itxd);
if (itxd == ring->last_free)
return -ENOMEM;
itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
return -ENOMEM;
mtk_tx_set_dma_desc(dev, itxd, &txd_info);
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->mac_id = mac->id;
setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++);
/* TX SG offload */
txd = itxd;
txd_pdma = qdma_to_pdma(ring, txd);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
while (frag_size) {
bool new_desc = true;
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
(i & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
txd_pdma = qdma_to_pdma(ring, txd);
if (txd == ring->last_free)
goto err_dma;
n_desc++;
} else {
new_desc = false;
}
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
soc->txrx.dma_max_len);
txd_info.qid = queue;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
offset, txd_info.size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
goto err_dma;
mtk_tx_set_dma_desc(dev, txd, &txd_info);
tx_buf = mtk_desc_to_tx_buf(ring, txd,
soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->mac_id = mac->id;
setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
txd_info.size, k++);
frag_size -= txd_info.size;
offset += txd_info.size;
}
}
/* store skb to cleanup */
itx_buf->type = MTK_TYPE_SKB;
itx_buf->data = skb;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
txd_pdma->txd2 |= TX_DMA_LS1;
}
netdev_tx_sent_queue(txq, skb->len);
skb_tx_timestamp(skb);
ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
atomic_sub(n_desc, &ring->free_count);
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (netif_xmit_stopped(txq) || !netdev_xmit_more())
mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else {
int next_idx;
next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
return 0;
err_dma:
do {
tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */
mtk_tx_unmap(eth, tx_buf, NULL, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
itxd_pdma = qdma_to_pdma(ring, itxd);
} while (itxd != txd);
return -ENOMEM;
}
static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
{
int i, nfrags = 1;
skb_frag_t *frag;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
eth->soc->txrx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
}
return nfrags;
}
static int mtk_queue_stopped(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
if (netif_queue_stopped(eth->netdev[i]))
return 1;
}
return 0;
}
static void mtk_wake_queue(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
netif_tx_wake_all_queues(eth->netdev[i]);
}
}
static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_ring *ring = ð->tx_ring;
struct net_device_stats *stats = &dev->stats;
bool gso = false;
int tx_num;
/* normally we can rely on the stack not calling this more than once,
* however we have 2 queues running on the same ring so we need to lock
* the ring access
*/
spin_lock(ð->page_lock);
if (unlikely(test_bit(MTK_RESETTING, ð->state)))
goto drop;
tx_num = mtk_cal_txd_req(eth, skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
netif_tx_stop_all_queues(dev);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
spin_unlock(ð->page_lock);
return NETDEV_TX_BUSY;
}
/* TSO: fill MSS info in tcp checksum field */
if (skb_is_gso(skb)) {
if (skb_cow_head(skb, 0)) {
netif_warn(eth, tx_err, dev,
"GSO expand head fail.\n");
goto drop;
}
if (skb_shinfo(skb)->gso_type &
(SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
gso = true;
tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
}
}
if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
netif_tx_stop_all_queues(dev);
spin_unlock(ð->page_lock);
return NETDEV_TX_OK;
drop:
spin_unlock(ð->page_lock);
stats->tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
{
int i;
struct mtk_rx_ring *ring;
int idx;
if (!eth->hwlro)
return ð->rx_ring[0];
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
struct mtk_rx_dma *rxd;
ring = ð->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
}
}
return NULL;
}
static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
{
struct mtk_rx_ring *ring;
int i;
if (!eth->hwlro) {
ring = ð->rx_ring[0];
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
} else {
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
ring = ð->rx_ring[i];
if (ring->calc_idx_update) {
ring->calc_idx_update = false;
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
}
}
}
}
static bool mtk_page_pool_enabled(struct mtk_eth *eth)
{
return mtk_is_netsys_v2_or_greater(eth);
}
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
struct xdp_rxq_info *xdp_q,
int id, int size)
{
struct page_pool_params pp_params = {
.order = 0,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = NUMA_NO_NODE,
.dev = eth->dma_dev,
.offset = MTK_PP_HEADROOM,
.max_len = MTK_PP_MAX_BUF_SIZE,
};
struct page_pool *pp;
int err;
pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE;
pp = page_pool_create(&pp_params);
if (IS_ERR(pp))
return pp;
err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id,
eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
if (err)
goto err_unregister_rxq;
return pp;
err_unregister_rxq:
xdp_rxq_info_unreg(xdp_q);
err_free_pp:
page_pool_destroy(pp);
return ERR_PTR(err);
}
static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
gfp_t gfp_mask)
{
struct page *page;
page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
if (!page)
return NULL;
*dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
return page_address(page);
}
static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
{
if (ring->page_pool)
page_pool_put_full_page(ring->page_pool,
virt_to_head_page(data), napi);
else
skb_free_frag(data);
}
static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
struct mtk_tx_dma_desc_info *txd_info,
struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
void *data, u16 headroom, int index, bool dma_map)
{
struct mtk_tx_ring *ring = ð->tx_ring;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_tx_dma *txd_pdma;
if (dma_map) { /* ndo_xdp_xmit */
txd_info->addr = dma_map_single(eth->dma_dev, data,
txd_info->size, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
return -ENOMEM;
tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
} else {
struct page *page = virt_to_head_page(data);
txd_info->addr = page_pool_get_dma_addr(page) +
sizeof(struct xdp_frame) + headroom;
dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
txd_info->size, DMA_BIDIRECTIONAL);
}
mtk_tx_set_dma_desc(dev, txd, txd_info);
tx_buf->mac_id = mac->id;
tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd_pdma = qdma_to_pdma(ring, txd);
setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
index);
return 0;
}
static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
struct net_device *dev, bool dma_map)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = ð->tx_ring;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_tx_dma_desc_info txd_info = {
.size = xdpf->len,
.first = true,
.last = !xdp_frame_has_frags(xdpf),
.qid = mac->id,
};
int err, index = 0, n_desc = 1, nr_frags;
struct mtk_tx_buf *htx_buf, *tx_buf;
struct mtk_tx_dma *htxd, *txd;
void *data = xdpf->data;
if (unlikely(test_bit(MTK_RESETTING, ð->state)))
return -EBUSY;
nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
return -EBUSY;
spin_lock(ð->page_lock);
txd = ring->next_free;
if (txd == ring->last_free) {
spin_unlock(ð->page_lock);
return -ENOMEM;
}
htxd = txd;
tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
memset(tx_buf, 0, sizeof(*tx_buf));
htx_buf = tx_buf;
for (;;) {
err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
data, xdpf->headroom, index, dma_map);
if (err < 0)
goto unmap;
if (txd_info.last)
break;
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
if (txd == ring->last_free)
goto unmap;
tx_buf = mtk_desc_to_tx_buf(ring, txd,
soc->txrx.txd_size);
memset(tx_buf, 0, sizeof(*tx_buf));
n_desc++;
}
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = skb_frag_size(&sinfo->frags[index]);
txd_info.last = index + 1 == nr_frags;
txd_info.qid = mac->id;
data = skb_frag_address(&sinfo->frags[index]);
index++;
}
/* store xdpf for cleanup */
htx_buf->data = xdpf;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
if (index & 1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
txd_pdma->txd2 |= TX_DMA_LS1;
}
ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
atomic_sub(n_desc, &ring->free_count);
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else {
int idx;
idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
spin_unlock(ð->page_lock);
return 0;
unmap:
while (htxd != txd) {
tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
txd_pdma->txd2 = TX_DMA_DESP2_DEF;
}
htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
}
spin_unlock(ð->page_lock);
return err;
}
static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
struct xdp_frame **frames, u32 flags)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_hw_stats *hw_stats = mac->hw_stats;
struct mtk_eth *eth = mac->hw;
int i, nxmit = 0;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
for (i = 0; i < num_frame; i++) {
if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
break;
nxmit++;
}
u64_stats_update_begin(&hw_stats->syncp);
hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
u64_stats_update_end(&hw_stats->syncp);
return nxmit;
}
static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
struct xdp_buff *xdp, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_hw_stats *hw_stats = mac->hw_stats;
u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
struct bpf_prog *prog;
u32 act = XDP_PASS;
rcu_read_lock();
prog = rcu_dereference(eth->prog);
if (!prog)
goto out;
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
count = &hw_stats->xdp_stats.rx_xdp_pass;
goto update_stats;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
act = XDP_DROP;
break;
}
count = &hw_stats->xdp_stats.rx_xdp_redirect;
goto update_stats;
case XDP_TX: {
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
act = XDP_DROP;
break;
}
count = &hw_stats->xdp_stats.rx_xdp_tx;
goto update_stats;
}
default:
bpf_warn_invalid_xdp_action(dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dev, prog, act);
fallthrough;
case XDP_DROP:
break;
}
page_pool_put_full_page(ring->page_pool,
virt_to_head_page(xdp->data), true);
update_stats:
u64_stats_update_begin(&hw_stats->syncp);
*count = *count + 1;
u64_stats_update_end(&hw_stats->syncp);
out:
rcu_read_unlock();
return act;
}
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
bool xdp_flush = false;
int idx;
struct sk_buff *skb;
u64 addr64 = 0;
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
dma_addr_t dma_addr = DMA_MAPPING_ERROR;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
u32 hash, reason;
int mac = 0;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
if (mtk_is_netsys_v2_or_greater(eth)) {
u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
switch (val) {
case PSE_GDM1_PORT:
case PSE_GDM2_PORT:
mac = val - 1;
break;
case PSE_GDM3_PORT:
mac = MTK_GMAC3_ID;
break;
default:
break;
}
} else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
!(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
}
if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
!eth->netdev[mac]))
goto release_desc;
netdev = eth->netdev[mac];
if (unlikely(test_bit(MTK_RESETTING, ð->state)))
goto release_desc;
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
/* alloc new buffer */
if (ring->page_pool) {
struct page *page = virt_to_head_page(data);
struct xdp_buff xdp;
u32 ret;
new_data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr,
GFP_ATOMIC);
if (unlikely(!new_data)) {
netdev->stats.rx_dropped++;
goto release_desc;
}
dma_sync_single_for_cpu(eth->dma_dev,
page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
pktlen, page_pool_get_dma_dir(ring->page_pool));
xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
false);
xdp_buff_clear_frags_flag(&xdp);
ret = mtk_xdp_run(eth, ring, &xdp, netdev);
if (ret == XDP_REDIRECT)
xdp_flush = true;
if (ret != XDP_PASS)
goto skip_rx;
skb = build_skb(data, PAGE_SIZE);
if (unlikely(!skb)) {
page_pool_put_full_page(ring->page_pool,
page, true);
netdev->stats.rx_dropped++;
goto skip_rx;
}
skb_reserve(skb, xdp.data - xdp.data_hard_start);
skb_put(skb, xdp.data_end - xdp.data);
skb_mark_for_recycle(skb);
} else {
if (ring->frag_size <= PAGE_SIZE)
new_data = napi_alloc_frag(ring->frag_size);
else
new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
if (unlikely(!new_data)) {
netdev->stats.rx_dropped++;
goto release_desc;
}
dma_addr = dma_map_single(eth->dma_dev,
new_data + NET_SKB_PAD + eth->ip_align,
ring->buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev,
dma_addr))) {
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
ring->buf_size, DMA_FROM_DEVICE);
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
netdev->stats.rx_dropped++;
skb_free_frag(data);
goto skip_rx;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
skb_put(skb, pktlen);
}
skb->dev = netdev;
bytes += skb->len;
if (mtk_is_netsys_v2_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
skb_set_hash(skb, jhash_1word(hash, 0),
PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd3;
} else {
reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
if (hash != MTK_RXD4_FOE_ENTRY)
skb_set_hash(skb, jhash_1word(hash, 0),
PKT_HASH_TYPE_L4);
rxdcsum = &trxd.rxd4;
}
if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
netdev_uses_dsa(netdev)) {
unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
eth->dsa_meta[port])
skb_dst_set_noref(skb, ð->dsa_meta[port]->dst);
}
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
mtk_ppe_check_skb(eth->ppe[0], skb, hash);
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
likely(dma_addr != DMA_MAPPING_ERROR))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
ring->calc_idx = idx;
done++;
}
rx_done:
if (done) {
/* make sure that all changes to the dma ring are flushed before
* we continue
*/
wmb();
mtk_update_rx_cpu_idx(eth);
}
eth->rx_packets += done;
eth->rx_bytes += bytes;
dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
&dim_sample);
net_dim(ð->rx_dim, dim_sample);
if (xdp_flush)
xdp_do_flush_map();
return done;
}
struct mtk_poll_state {
struct netdev_queue *txq;
unsigned int total;
unsigned int done;
unsigned int bytes;
};
static void
mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
struct sk_buff *skb)
{
struct netdev_queue *txq;
struct net_device *dev;
unsigned int bytes = skb->len;
state->total++;
eth->tx_packets++;
eth->tx_bytes += bytes;
dev = eth->netdev[mac];
if (!dev)
return;
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
if (state->txq == txq) {
state->done++;
state->bytes += bytes;
return;
}
if (state->txq)
netdev_tx_completed_queue(state->txq, state->done, state->bytes);
state->txq = txq;
state->done = 1;
state->bytes = bytes;
}
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
struct mtk_poll_state *state)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = ð->tx_ring;
struct mtk_tx_buf *tx_buf;
struct xdp_frame_bulk bq;
struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->last_free_ptr;
dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu);
xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break;
tx_buf = mtk_desc_to_tx_buf(ring, desc,
eth->soc->txrx.txd_size);
if (!tx_buf->data)
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
if (tx_buf->type == MTK_TYPE_SKB)
mtk_poll_tx_done(eth, state, tx_buf->mac_id,
tx_buf->data);
budget--;
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = next_cpu;
}
xdp_flush_frame_bulk(&bq);
ring->last_free_ptr = cpu;
mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
return budget;
}
static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
struct mtk_poll_state *state)
{
struct mtk_tx_ring *ring = ð->tx_ring;
struct mtk_tx_buf *tx_buf;
struct xdp_frame_bulk bq;
struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
if (!tx_buf->data)
break;
if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
if (tx_buf->type == MTK_TYPE_SKB)
mtk_poll_tx_done(eth, state, 0, tx_buf->data);
budget--;
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
}
xdp_flush_frame_bulk(&bq);
ring->cpu_idx = cpu;
return budget;
}
static int mtk_poll_tx(struct mtk_eth *eth, int budget)
{
struct mtk_tx_ring *ring = ð->tx_ring;
struct dim_sample dim_sample = {};
struct mtk_poll_state state = {};
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
budget = mtk_poll_tx_qdma(eth, budget, &state);
else
budget = mtk_poll_tx_pdma(eth, budget, &state);
if (state.txq)
netdev_tx_completed_queue(state.txq, state.done, state.bytes);
dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
&dim_sample);
net_dim(ð->tx_dim, dim_sample);
if (mtk_queue_stopped(eth) &&
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
return state.total;
}
static void mtk_handle_status_irq(struct mtk_eth *eth)
{
u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
mtk_stats_update(eth);
mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
MTK_INT_STATUS2);
}
}
static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int tx_done = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_handle_status_irq(eth);
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n", tx_done,
mtk_r32(eth, reg_map->tx_irq_status),
mtk_r32(eth, reg_map->tx_irq_mask));
}
if (tx_done == budget)
return budget;
if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
return budget;
if (napi_complete_done(napi, tx_done))
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
return tx_done;
}
static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int rx_done_total = 0;
mtk_handle_status_irq(eth);
do {
int rx_done;
mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done rx %d, intr 0x%08x/0x%x\n", rx_done,
mtk_r32(eth, reg_map->pdma.irq_status),
mtk_r32(eth, reg_map->pdma.irq_mask));
}
if (rx_done_total == budget)
return budget;
} while (mtk_r32(eth, reg_map->pdma.irq_status) &
eth->soc->txrx.rx_irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
return rx_done_total;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = ð->tx_ring;
int i, sz = soc->txrx.txd_size;
struct mtk_tx_dma_v2 *txd;
int ring_size;
u32 ofs, val;
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
ring_size = MTK_QDMA_RING_SIZE;
else
ring_size = MTK_DMA_SIZE;
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL);
if (!ring->buf)
goto no_tx_mem;
if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
ring->dma = eth->sram_base + ring_size * sz;
ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
} else {
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys, GFP_KERNEL);
}
if (!ring->dma)
goto no_tx_mem;
for (i = 0; i < ring_size; i++) {
int next = (i + 1) % ring_size;
u32 next_ptr = ring->phys + next * sz;
txd = ring->dma + i * sz;
txd->txd2 = next_ptr;
txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
txd->txd4 = 0;
if (mtk_is_netsys_v2_or_greater(eth)) {
txd->txd5 = 0;
txd->txd6 = 0;
txd->txd7 = 0;
txd->txd8 = 0;
}
}
/* On MT7688 (PDMA only) this driver uses the ring->dma structs
* only as the framework. The real HW descriptors are the PDMA
* descriptors in ring->dma_pdma.
*/
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys_pdma, GFP_KERNEL);
if (!ring->dma_pdma)
goto no_tx_mem;
for (i = 0; i < ring_size; i++) {
ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
ring->dma_pdma[i].txd4 = 0;
}
}
ring->dma_size = ring_size;
atomic_set(&ring->free_count, ring_size - 2);
ring->next_free = ring->dma;
ring->last_free = (void *)txd;
ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth,
ring->phys + ((ring_size - 1) * sz),
soc->reg_map->qdma.crx_ptr);
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
val = MTK_QTX_SCH_MIN_RATE_EN |
/* minimum: 10 Mbps */
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
if (mtk_is_netsys_v1(eth))
val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
ofs += MTK_QTX_OFFSET;
}
val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
if (mtk_is_netsys_v2_or_greater(eth))
mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
}
return 0;
no_tx_mem:
return -ENOMEM;
}
static void mtk_tx_clean(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = ð->tx_ring;
int i;
if (ring->buf) {
for (i = 0; i < ring->dma_size; i++)
mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
}
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
dma_free_coherent(eth->dma_dev,
ring->dma_size * soc->txrx.txd_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev,
ring->dma_size * soc->txrx.txd_size,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
}
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size, tx_ring_size;
int i;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
tx_ring_size = MTK_QDMA_RING_SIZE;
else
tx_ring_size = MTK_DMA_SIZE;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
ring = ð->rx_ring_qdma;
} else {
ring = ð->rx_ring[ring_no];
}
if (rx_flag == MTK_RX_FLAGS_HWLRO) {
rx_data_len = MTK_MAX_LRO_RX_LENGTH;
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
} else {
rx_data_len = ETH_DATA_LEN;
rx_dma_size = MTK_DMA_SIZE;
}
ring->frag_size = mtk_max_frag_size(rx_data_len);
ring->buf_size = mtk_max_buf_size(ring->frag_size);
ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
GFP_KERNEL);
if (!ring->data)
return -ENOMEM;
if (mtk_page_pool_enabled(eth)) {
struct page_pool *pp;
pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
rx_dma_size);
if (IS_ERR(pp))
return PTR_ERR(pp);
ring->page_pool = pp;
}
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
rx_flag != MTK_RX_FLAGS_NORMAL) {
ring->dma = dma_alloc_coherent(eth->dma_dev,
rx_dma_size * eth->soc->txrx.rxd_size,
&ring->phys, GFP_KERNEL);
} else {
struct mtk_tx_ring *tx_ring = ð->tx_ring;
ring->dma = tx_ring->dma + tx_ring_size *
eth->soc->txrx.txd_size * (ring_no + 1);
ring->phys = tx_ring->phys + tx_ring_size *
eth->soc->txrx.txd_size * (ring_no + 1);
}
if (!ring->dma)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
struct mtk_rx_dma_v2 *rxd;
dma_addr_t dma_addr;
void *data;
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
if (ring->page_pool) {
data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr, GFP_KERNEL);
if (!data)
return -ENOMEM;
} else {
if (ring->frag_size <= PAGE_SIZE)
data = netdev_alloc_frag(ring->frag_size);
else
data = mtk_max_lro_buf_alloc(GFP_KERNEL);
if (!data)
return -ENOMEM;
dma_addr = dma_map_single(eth->dma_dev,
data + NET_SKB_PAD + eth->ip_align,
ring->buf_size, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dma_dev,
dma_addr))) {
skb_free_frag(data);
return -ENOMEM;
}
}
rxd->rxd1 = (unsigned int)dma_addr;
ring->data[i] = data;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
rxd->rxd3 = 0;
rxd->rxd4 = 0;
if (mtk_is_netsys_v2_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
rxd->rxd8 = 0;
}
}
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
if (rx_flag == MTK_RX_FLAGS_QDMA)
ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
ring_no * MTK_QRX_OFFSET;
else
ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
ring_no * MTK_QRX_OFFSET;
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
if (rx_flag == MTK_RX_FLAGS_QDMA) {
mtk_w32(eth, ring->phys,
reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
mtk_w32(eth, rx_dma_size,
reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
reg_map->qdma.rst_idx);
} else {
mtk_w32(eth, ring->phys,
reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
mtk_w32(eth, rx_dma_size,
reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
reg_map->pdma.rst_idx);
}
mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
return 0;
}
static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
{
u64 addr64 = 0;
int i;
if (ring->data && ring->dma) {
for (i = 0; i < ring->dma_size; i++) {
struct mtk_rx_dma *rxd;
if (!ring->data[i])
continue;
rxd = ring->dma + i * eth->soc->txrx.rxd_size;
if (!rxd->rxd1)
continue;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
ring->buf_size, DMA_FROM_DEVICE);
mtk_rx_put_buff(ring, ring->data[i], false);
}
kfree(ring->data);
ring->data = NULL;
}
if (!in_sram && ring->dma) {
dma_free_coherent(eth->dma_dev,
ring->dma_size * eth->soc->txrx.rxd_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->page_pool) {
if (xdp_rxq_info_is_reg(&ring->xdp_q))
xdp_rxq_info_unreg(&ring->xdp_q);
page_pool_destroy(ring->page_pool);
ring->page_pool = NULL;
}
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
{
int i;
u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
/* set LRO rings to auto-learn modes */
ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
/* validate LRO ring */
ring_ctrl_dw2 |= MTK_RING_VLD;
/* set AGE timer (unit: 20us) */
ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
/* set max AGG timer (unit: 20us) */
ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
/* set max LRO AGG count */
ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
}
/* IPv4 checksum update enable */
lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
/* switch priority comparison to packet count mode */
lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
/* bandwidth threshold setting */
mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
/* auto-learn score delta setting */
mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
/* set refresh timer for altering flows to 1 sec. (unit: 20us) */
mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
MTK_PDMA_LRO_ALT_REFRESH_TIMER);
/* set HW LRO mode & the max aggregation count for rx packets */
lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
/* the minimal remaining room of SDL0 in RXD for lro aggregation */
lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
/* enable HW LRO */
lro_ctrl_dw0 |= MTK_LRO_EN;
mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
return 0;
}
static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
{
int i;
u32 val;
/* relinquish lro rings, flush aggregated packets */
mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
/* wait for relinquishments done */
for (i = 0; i < 10; i++) {
val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
if (val & MTK_LRO_RING_RELINQUISH_DONE) {
msleep(20);
continue;
}
break;
}
/* invalidate lro rings */
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
/* disable HW LRO */
mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
}
static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
{
u32 reg_val;
reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
/* invalidate the IP setting */
mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
/* validate the IP setting */
mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
}
static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
{
u32 reg_val;
reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
/* invalidate the IP setting */
mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
}
static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
{
int cnt = 0;
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
if (mac->hwlro_ip[i])
cnt++;
}
return cnt;
}
static int mtk_hwlro_add_ipaddr(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int hwlro_idx;
if ((fsp->flow_type != TCP_V4_FLOW) ||
(!fsp->h_u.tcp_ip4_spec.ip4dst) ||
(fsp->location > 1))
return -EINVAL;
mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
return 0;
}
static int mtk_hwlro_del_ipaddr(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int hwlro_idx;
if (fsp->location > 1)
return -EINVAL;
mac->hwlro_ip[fsp->location] = 0;
hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
return 0;
}
static void mtk_hwlro_netdev_disable(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int i, hwlro_idx;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
mac->hwlro_ip[i] = 0;
hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
}
mac->hwlro_ip_cnt = 0;
}
static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct mtk_mac *mac = netdev_priv(dev);
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
return -EINVAL;
/* only tcp dst ipv4 is meaningful, others are meaningless */
fsp->flow_type = TCP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
fsp->m_u.tcp_ip4_spec.ip4dst = 0;
fsp->h_u.tcp_ip4_spec.ip4src = 0;
fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
fsp->h_u.tcp_ip4_spec.psrc = 0;
fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
fsp->h_u.tcp_ip4_spec.pdst = 0;
fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
fsp->h_u.tcp_ip4_spec.tos = 0;
fsp->m_u.tcp_ip4_spec.tos = 0xff;
return 0;
}
static int mtk_hwlro_get_fdir_all(struct net_device *dev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct mtk_mac *mac = netdev_priv(dev);
int cnt = 0;
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;
}
}
cmd->rule_cnt = cnt;
return 0;
}
static netdev_features_t mtk_fix_features(struct net_device *dev,
netdev_features_t features)
{
if (!(features & NETIF_F_LRO)) {
struct mtk_mac *mac = netdev_priv(dev);
int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
if (ip_cnt) {
netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
features |= NETIF_F_LRO;
}
}
return features;
}
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
netdev_features_t diff = dev->features ^ features;
if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
mtk_hwlro_netdev_disable(dev);
return 0;
}
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
unsigned int reg;
int ret;
u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
reg = eth->soc->reg_map->qdma.glo_cfg;
else
reg = eth->soc->reg_map->pdma.glo_cfg;
ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
5, MTK_DMA_BUSY_TIMEOUT_US);
if (ret)
dev_err(eth->dev, "DMA init timeout\n");
return ret;
}
static int mtk_dma_init(struct mtk_eth *eth)
{
int err;
u32 i;
if (mtk_dma_busy_wait(eth))
return -EBUSY;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
/* QDMA needs scratch memory for internal reordering of the
* descriptors
*/
err = mtk_init_fq_dma(eth);
if (err)
return err;
}
err = mtk_tx_alloc(eth);
if (err)
return err;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
if (err)
return err;
}
err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
return err;
if (eth->hwlro) {
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
if (err)
return err;
}
err = mtk_hwlro_rx_init(eth);
if (err)
return err;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
/* Enable random early drop and set drop threshold
* automatically
*/
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
}
return 0;
}
static void mtk_dma_free(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
int i;
for (i = 0; i < MTK_MAX_DEVS; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
mtk_rx_clean(eth, ð->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
mtk_rx_clean(eth, ð->rx_ring_qdma, false);
if (eth->hwlro) {
mtk_hwlro_rx_uninit(eth);
for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
mtk_rx_clean(eth, ð->rx_ring[i], false);
}
kfree(eth->scratch_head);
}
static bool mtk_hw_reset_check(struct mtk_eth *eth)
{
u32 val = mtk_r32(eth, MTK_INT_STATUS2);
return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
(val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
(val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
}
static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
if (test_bit(MTK_RESETTING, ð->state))
return;
if (!mtk_hw_reset_check(eth))
return;
eth->netdev[mac->id]->stats.tx_errors++;
netif_err(eth, tx_err, dev, "transmit timed out\n");
schedule_work(ð->pending_work);
}
static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
eth->rx_events++;
if (likely(napi_schedule_prep(ð->rx_napi))) {
__napi_schedule(ð->rx_napi);
mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
}
return IRQ_HANDLED;
}
static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
eth->tx_events++;
if (likely(napi_schedule_prep(ð->tx_napi))) {
__napi_schedule(ð->tx_napi);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
}
return IRQ_HANDLED;
}
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
if (mtk_r32(eth, reg_map->pdma.irq_mask) &
eth->soc->txrx.rx_irq_done_mask) {
if (mtk_r32(eth, reg_map->pdma.irq_status) &
eth->soc->txrx.rx_irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth);
}
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void mtk_poll_controller(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
}
#endif
static int mtk_start_dma(struct mtk_eth *eth)
{
u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int err;
err = mtk_dma_init(eth);
if (err) {
mtk_dma_free(eth);
return err;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
val = mtk_r32(eth, reg_map->qdma.glo_cfg);
val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
if (mtk_is_netsys_v2_or_greater(eth))
val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
else
val |= MTK_RX_BT_32DWORDS;
mtk_w32(eth, val, reg_map->qdma.glo_cfg);
mtk_w32(eth,
MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
reg_map->pdma.glo_cfg);
} else {
mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
reg_map->pdma.glo_cfg);
}
return 0;
}
static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
{
int i;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
for (i = 0; i < MTK_MAX_DEVS; i++) {
u32 val;
if (!eth->netdev[i])
continue;
val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
/* default setup the forward port to send frame to PDMA */
val &= ~0xffff;
/* Enable RX checksum */
val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
val |= config;
if (netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
}
/* Reset and enable PSE */
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
}
static bool mtk_uses_dsa(struct net_device *dev)
{
#if IS_ENABLED(CONFIG_NET_DSA)
return netdev_uses_dsa(dev) &&
dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
#else
return false;
#endif
}
static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
{
struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
struct mtk_eth *eth = mac->hw;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ethtool_link_ksettings s;
struct net_device *ldev;
struct list_head *iter;
struct dsa_port *dp;
if (event != NETDEV_CHANGE)
return NOTIFY_DONE;
netdev_for_each_lower_dev(dev, ldev, iter) {
if (netdev_priv(ldev) == mac)
goto found;
}
return NOTIFY_DONE;
found:
if (!dsa_slave_dev_check(dev))
return NOTIFY_DONE;
if (__ethtool_get_link_ksettings(dev, &s))
return NOTIFY_DONE;
if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
return NOTIFY_DONE;
dp = dsa_port_from_netdev(dev);
if (dp->index >= MTK_QDMA_NUM_QUEUES)
return NOTIFY_DONE;
if (mac->speed > 0 && mac->speed <= s.base.speed)
s.base.speed = 0;
mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
return NOTIFY_DONE;
}
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int i, err;
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
err);
return err;
}
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(ð->dma_refcnt)) {
const struct mtk_soc_data *soc = eth->soc;
u32 gdm_config;
int i;
err = mtk_start_dma(eth);
if (err) {
phylink_disconnect_phy(mac->phylink);
return err;
}
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_start(eth->ppe[i]);
gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
: MTK_GDMA_TO_PDMA;
mtk_gdm_config(eth, gdm_config);
napi_enable(ð->tx_napi);
napi_enable(ð->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
refcount_set(ð->dma_refcnt, 1);
}
else
refcount_inc(ð->dma_refcnt);
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
if (mtk_is_netsys_v2_or_greater(eth))
return 0;
if (mtk_uses_dsa(dev) && !eth->prog) {
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
struct metadata_dst *md_dst = eth->dsa_meta[i];
if (md_dst)
continue;
md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!md_dst)
return -ENOMEM;
md_dst->u.port_info.port_id = i;
eth->dsa_meta[i] = md_dst;
}
} else {
/* Hardware DSA untagging and VLAN RX offloading need to be
* disabled if at least one MAC does not use DSA.
*/
u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
val &= ~MTK_CDMP_STAG_EN;
mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
}
return 0;
}
static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
{
u32 val;
int i;
/* stop the dma engine */
spin_lock_bh(ð->page_lock);
val = mtk_r32(eth, glo_cfg);
mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
glo_cfg);
spin_unlock_bh(ð->page_lock);
/* wait for dma stop */
for (i = 0; i < 10; i++) {
val = mtk_r32(eth, glo_cfg);
if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
msleep(20);
continue;
}
break;
}
}
static int mtk_stop(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
int i;
phylink_stop(mac->phylink);
netif_tx_disable(dev);
phylink_disconnect_phy(mac->phylink);
/* only shutdown DMA if this is the last user */
if (!refcount_dec_and_test(ð->dma_refcnt))
return 0;
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
napi_disable(ð->tx_napi);
napi_disable(ð->rx_napi);
cancel_work_sync(ð->rx_dim.work);
cancel_work_sync(ð->tx_dim.work);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
mtk_dma_free(eth);
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_stop(eth->ppe[i]);
return 0;
}
static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct bpf_prog *old_prog;
bool need_update;
if (eth->hwlro) {
NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
return -EOPNOTSUPP;
}
if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
need_update = !!eth->prog != !!prog;
if (netif_running(dev) && need_update)
mtk_stop(dev);
old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
if (old_prog)
bpf_prog_put(old_prog);
if (netif_running(dev) && need_update)
return mtk_open(dev);
return 0;
}
static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
default:
return -EINVAL;
}
}
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
reset_bits,
reset_bits);
usleep_range(1000, 1100);
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
reset_bits,
~reset_bits);
mdelay(10);
}
static void mtk_clk_disable(struct mtk_eth *eth)
{
int clk;
for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
clk_disable_unprepare(eth->clks[clk]);
}
static int mtk_clk_enable(struct mtk_eth *eth)
{
int clk, ret;
for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
ret = clk_prepare_enable(eth->clks[clk]);
if (ret)
goto err_disable_clks;
}
return 0;
err_disable_clks:
while (--clk >= 0)
clk_disable_unprepare(eth->clks[clk]);
return ret;
}
static void mtk_dim_rx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
dim->profile_ix);
spin_lock_bh(ð->dim_lock);
val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_TX_MASK;
val |= MTK_PDMA_DELAY_RX_EN;
cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(ð->dim_lock);
dim->state = DIM_START_MEASURE;
}
static void mtk_dim_tx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
dim->profile_ix);
spin_lock_bh(ð->dim_lock);
val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_RX_MASK;
val |= MTK_PDMA_DELAY_TX_EN;
cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(ð->dim_lock);
dim->state = DIM_START_MEASURE;
}
static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
{
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
if (val <= 1518)
mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
else if (val <= 1536)
mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
else if (val <= 1552)
mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
else
mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
if (mcr_new != mcr_cur)
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
}
static void mtk_hw_reset(struct mtk_eth *eth)
{
u32 val;
if (mtk_is_netsys_v2_or_greater(eth))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
if (mtk_is_netsys_v3_or_greater(eth)) {
val = RSTCTRL_PPE0_V3;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
val |= RSTCTRL_PPE1_V3;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
val |= RSTCTRL_PPE2;
val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
} else if (mtk_is_netsys_v2_or_greater(eth)) {
val = RSTCTRL_PPE0_V2;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
val |= RSTCTRL_PPE1;
} else {
val = RSTCTRL_PPE0;
}
ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
if (mtk_is_netsys_v3_or_greater(eth))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
0x6f8ff);
else if (mtk_is_netsys_v2_or_greater(eth))
regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
0x3ffffff);
}
static u32 mtk_hw_reset_read(struct mtk_eth *eth)
{
u32 val;
regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
return val;
}
static void mtk_hw_warm_reset(struct mtk_eth *eth)
{
u32 rst_mask, val;
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
RSTCTRL_FE);
if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
val & RSTCTRL_FE, 1, 1000)) {
dev_err(eth->dev, "warm reset failed\n");
mtk_hw_reset(eth);
return;
}
if (mtk_is_netsys_v3_or_greater(eth)) {
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
rst_mask |= RSTCTRL_PPE1_V3;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
rst_mask |= RSTCTRL_PPE2;
rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
} else if (mtk_is_netsys_v2_or_greater(eth)) {
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
rst_mask |= RSTCTRL_PPE1;
} else {
rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
}
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
udelay(1);
val = mtk_hw_reset_read(eth);
if (!(val & rst_mask))
dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
val, rst_mask);
rst_mask |= RSTCTRL_FE;
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
udelay(1);
val = mtk_hw_reset_read(eth);
if (val & rst_mask)
dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
val, rst_mask);
}
static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
bool oq_hang, cdm1_busy, adma_busy;
bool wtx_busy, cdm_full, oq_free;
u32 wdidx, val, gdm1_fc, gdm2_fc;
bool qfsm_hang, qfwd_hang;
bool ret = false;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return false;
/* WDMA sanity checks */
wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
!(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
!(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
if (++eth->reset.wdma_hang_count > 2) {
eth->reset.wdma_hang_count = 0;
ret = true;
}
goto out;
}
/* QDMA sanity checks */
qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
if (qfsm_hang && qfwd_hang &&
((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
(gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
if (++eth->reset.qdma_hang_count > 2) {
eth->reset.qdma_hang_count = 0;
ret = true;
}
goto out;
}
/* ADMA sanity checks */
oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
!(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
if (oq_hang && cdm1_busy && adma_busy) {
if (++eth->reset.adma_hang_count > 2) {
eth->reset.adma_hang_count = 0;
ret = true;
}
goto out;
}
eth->reset.wdma_hang_count = 0;
eth->reset.qdma_hang_count = 0;
eth->reset.adma_hang_count = 0;
out:
eth->reset.wdidx = wdidx;
return ret;
}
static void mtk_hw_reset_monitor_work(struct work_struct *work)
{
struct delayed_work *del_work = to_delayed_work(work);
struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
reset.monitor_work);
if (test_bit(MTK_RESETTING, ð->state))
goto out;
/* DMA stuck checks */
if (mtk_hw_check_dma_hang(eth))
schedule_work(ð->pending_work);
out:
schedule_delayed_work(ð->reset.monitor_work,
MTK_DMA_MONITOR_TIMEOUT);
}
static int mtk_hw_init(struct mtk_eth *eth, bool reset)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
ETHSYS_DMA_AG_MAP_PPE;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state))
return 0;
if (!reset) {
pm_runtime_enable(eth->dev);
pm_runtime_get_sync(eth->dev);
ret = mtk_clk_enable(eth);
if (ret)
goto err_disable_pm;
}
if (eth->ethsys)
regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
ret = device_reset(eth->dev);
if (ret) {
dev_err(eth->dev, "MAC reset failed!\n");
goto err_disable_pm;
}
/* set interrupt delays based on current Net DIM sample */
mtk_dim_rx(ð->rx_dim.work);
mtk_dim_tx(ð->tx_dim.work);
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
return 0;
}
msleep(100);
if (reset)
mtk_hw_warm_reset(eth);
else
mtk_hw_reset(eth);
if (mtk_is_netsys_v2_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
}
if (eth->pctl) {
/* Set GE2 driving and slew rate */
regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
/* set GE2 TDSEL */
regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
/* set GE2 TUNE */
regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
}
/* Set linkdown as the default for each GMAC. Its own MCR would be set
* up with the more appropriate value when mtk_mac_config call is being
* invoked.
*/
for (i = 0; i < MTK_MAX_DEVS; i++) {
struct net_device *dev = eth->netdev[i];
if (!dev)
continue;
mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
mtk_set_mcr_max_rx(netdev_priv(dev),
dev->mtu + MTK_RX_ETH_HLEN);
}
/* Indicates CDM to parse the MTK special tag from CPU
* which also is working out for untag packets.
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
if (mtk_is_netsys_v1(eth)) {
val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
}
/* set interrupt delays based on current Net DIM sample */
mtk_dim_rx(ð->rx_dim.work);
mtk_dim_tx(ð->tx_dim.work);
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
/* PSE should not drop port1, port8 and port9 packets */
mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
/* GDM and CDM Threshold */
mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
/* Disable GDM1 RX CRC stripping */
mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
/* PSE GDM3 MIB counter has incorrect hw default values,
* so the driver ought to read clear the values beforehand
* in case ethtool retrieve wrong mib values.
*/
for (i = 0; i < 0x80; i += 0x4)
mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
} else if (!mtk_is_netsys_v1(eth)) {
/* PSE should not drop port8 and port9 packets from WDMA Tx */
mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
/* PSE should drop packets to port 8/9 on WDMA Rx ring full */
mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
/* PSE Free Queue Flow Control */
mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
/* PSE config input queue threshold */
mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
/* PSE config output queue threshold */
mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
/* GDM and CDM Threshold */
mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
}
return 0;
err_disable_pm:
if (!reset) {
pm_runtime_put_sync(eth->dev);
pm_runtime_disable(eth->dev);
}
return ret;
}
static int mtk_hw_deinit(struct mtk_eth *eth)
{
if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
return 0;
mtk_clk_disable(eth);
pm_runtime_put_sync(eth->dev);
pm_runtime_disable(eth->dev);
return 0;
}
static void mtk_uninit(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
phylink_disconnect_phy(mac->phylink);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
}
static int mtk_change_mtu(struct net_device *dev, int new_mtu)
{
int length = new_mtu + MTK_RX_ETH_HLEN;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
if (rcu_access_pointer(eth->prog) &&
length > MTK_PP_MAX_BUF_SIZE) {
netdev_err(dev, "Invalid MTU for XDP mode\n");
return -EINVAL;
}
mtk_set_mcr_max_rx(mac, length);
dev->mtu = new_mtu;
return 0;
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mtk_mac *mac = netdev_priv(dev);
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
return phylink_mii_ioctl(mac->phylink, ifr, cmd);
default:
break;
}
return -EOPNOTSUPP;
}
static void mtk_prepare_for_reset(struct mtk_eth *eth)
{
u32 val;
int i;
/* set FE PPE ports link down */
for (i = MTK_GMAC1_ID;
i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
i += 2) {
val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
}
/* adjust PPE configurations to prepare for reset */
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_prepare_reset(eth->ppe[i]);
/* disable NETSYS interrupts */
mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
/* force link down GMAC */
for (i = 0; i < 2; i++) {
val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
mtk_w32(eth, val, MTK_MAC_MCR(i));
}
}
static void mtk_pending_work(struct work_struct *work)
{
struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
unsigned long restart = 0;
u32 val;
int i;
rtnl_lock();
set_bit(MTK_RESETTING, ð->state);
mtk_prepare_for_reset(eth);
mtk_wed_fe_reset();
/* Run again reset preliminary configuration in order to avoid any
* possible race during FE reset since it can run releasing RTNL lock.
*/
mtk_prepare_for_reset(eth);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
continue;
mtk_stop(eth->netdev[i]);
__set_bit(i, &restart);
}
usleep_range(15000, 16000);
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
eth->dev->pins->default_state);
mtk_hw_init(eth, true);
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i] || !test_bit(i, &restart))
continue;
if (mtk_open(eth->netdev[i])) {
netif_alert(eth, ifup, eth->netdev[i],
"Driver up/down cycle failed\n");
dev_close(eth->netdev[i]);
}
}
/* set FE PPE ports link up */
for (i = MTK_GMAC1_ID;
i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
i += 2) {
val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
}
clear_bit(MTK_RESETTING, ð->state);
mtk_wed_fe_reset_complete();
rtnl_unlock();
}
static int mtk_free_dev(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
free_netdev(eth->netdev[i]);
}
for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
if (!eth->dsa_meta[i])
break;
metadata_dst_free(eth->dsa_meta[i]);
}
return 0;
}
static int mtk_unreg_dev(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
struct mtk_mac *mac;
if (!eth->netdev[i])
continue;
mac = netdev_priv(eth->netdev[i]);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
unregister_netdevice_notifier(&mac->device_notifier);
unregister_netdev(eth->netdev[i]);
}
return 0;
}
static void mtk_sgmii_destroy(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAX_DEVS; i++)
mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
}
static int mtk_cleanup(struct mtk_eth *eth)
{
mtk_sgmii_destroy(eth);
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(ð->pending_work);
cancel_delayed_work_sync(ð->reset.monitor_work);
return 0;
}
static int mtk_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
struct mtk_mac *mac = netdev_priv(ndev);
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
return phylink_ethtool_ksettings_get(mac->phylink, cmd);
}
static int mtk_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd)
{
struct mtk_mac *mac = netdev_priv(ndev);
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
return phylink_ethtool_ksettings_set(mac->phylink, cmd);
}
static void mtk_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct mtk_mac *mac = netdev_priv(dev);
strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
}
static u32 mtk_get_msglevel(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
return mac->hw->msg_enable;
}
static void mtk_set_msglevel(struct net_device *dev, u32 value)
{
struct mtk_mac *mac = netdev_priv(dev);
mac->hw->msg_enable = value;
}
static int mtk_nway_reset(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
if (!mac->phylink)
return -ENOTSUPP;
return phylink_ethtool_nway_reset(mac->phylink);
}
static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
int i;
switch (stringset) {
case ETH_SS_STATS: {
struct mtk_mac *mac = netdev_priv(dev);
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
if (mtk_page_pool_enabled(mac->hw))
page_pool_ethtool_stats_get_strings(data);
break;
}
default:
break;
}
}
static int mtk_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS: {
int count = ARRAY_SIZE(mtk_ethtool_stats);
struct mtk_mac *mac = netdev_priv(dev);
if (mtk_page_pool_enabled(mac->hw))
count += page_pool_ethtool_stats_get_count();
return count;
}
default:
return -EOPNOTSUPP;
}
}
static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
{
struct page_pool_stats stats = {};
int i;
for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
struct mtk_rx_ring *ring = ð->rx_ring[i];
if (!ring->page_pool)
continue;
page_pool_get_stats(ring->page_pool, &stats);
}
page_pool_ethtool_stats_get(data, &stats);
}
static void mtk_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_hw_stats *hwstats = mac->hw_stats;
u64 *data_src, *data_dst;
unsigned int start;
int i;
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return;
if (netif_running(dev) && netif_device_present(dev)) {
if (spin_trylock_bh(&hwstats->stats_lock)) {
mtk_stats_update_mac(mac);
spin_unlock_bh(&hwstats->stats_lock);
}
}
data_src = (u64 *)hwstats;
do {
data_dst = data;
start = u64_stats_fetch_begin(&hwstats->syncp);
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
if (mtk_page_pool_enabled(mac->hw))
mtk_ethtool_pp_stats(mac->hw, data_dst);
} while (u64_stats_fetch_retry(&hwstats->syncp, start));
}
static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
if (dev->hw_features & NETIF_F_LRO) {
cmd->data = MTK_MAX_RX_RING_NUM;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRLCNT:
if (dev->hw_features & NETIF_F_LRO) {
struct mtk_mac *mac = netdev_priv(dev);
cmd->rule_cnt = mac->hwlro_ip_cnt;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRULE:
if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_entry(dev, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_all(dev, cmd,
rule_locs);
break;
default:
break;
}
return ret;
}
static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_add_ipaddr(dev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_del_ipaddr(dev, cmd);
break;
default:
break;
}
return ret;
}
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct mtk_mac *mac = netdev_priv(dev);
unsigned int queue = 0;
if (netdev_uses_dsa(dev))
queue = skb_get_queue_mapping(skb) + 3;
else
queue = mac->id;
if (queue >= dev->num_tx_queues)
queue = 0;
return queue;
}
static const struct ethtool_ops mtk_ethtool_ops = {
.get_link_ksettings = mtk_get_link_ksettings,
.set_link_ksettings = mtk_set_link_ksettings,
.get_drvinfo = mtk_get_drvinfo,
.get_msglevel = mtk_get_msglevel,
.set_msglevel = mtk_set_msglevel,
.nway_reset = mtk_nway_reset,
.get_link = ethtool_op_get_link,
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
.get_rxnfc = mtk_get_rxnfc,
.set_rxnfc = mtk_set_rxnfc,
};
static const struct net_device_ops mtk_netdev_ops = {
.ndo_uninit = mtk_uninit,
.ndo_open = mtk_open,
.ndo_stop = mtk_stop,
.ndo_start_xmit = mtk_start_xmit,
.ndo_set_mac_address = mtk_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = mtk_do_ioctl,
.ndo_change_mtu = mtk_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
.ndo_fix_features = mtk_fix_features,
.ndo_set_features = mtk_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mtk_poll_controller,
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
.ndo_bpf = mtk_xdp,
.ndo_xdp_xmit = mtk_xdp_xmit,
.ndo_select_queue = mtk_select_queue,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
const __be32 *_id = of_get_property(np, "reg", NULL);
phy_interface_t phy_mode;
struct phylink *phylink;
struct mtk_mac *mac;
int id, err;
int txqs = 1;
u32 val;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
return -EINVAL;
}
id = be32_to_cpup(_id);
if (id >= MTK_MAX_DEVS) {
dev_err(eth->dev, "%d is not a valid mac id\n", id);
return -EINVAL;
}
if (eth->netdev[id]) {
dev_err(eth->dev, "duplicate mac id found: %d\n", id);
return -EINVAL;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
txqs = MTK_QDMA_NUM_QUEUES;
eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
if (!eth->netdev[id]) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
}
mac = netdev_priv(eth->netdev[id]);
eth->mac[id] = mac;
mac->id = id;
mac->hw = eth;
mac->of_node = np;
err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
if (err == -EPROBE_DEFER)
return err;
if (err) {
/* If the mac address is invalid, use random mac address */
eth_hw_addr_random(eth->netdev[id]);
dev_err(eth->dev, "generated random MAC address %pM\n",
eth->netdev[id]->dev_addr);
}
memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
mac->hwlro_ip_cnt = 0;
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
GFP_KERNEL);
if (!mac->hw_stats) {
dev_err(eth->dev, "failed to allocate counter memory\n");
err = -ENOMEM;
goto free_netdev;
}
spin_lock_init(&mac->hw_stats->stats_lock);
u64_stats_init(&mac->hw_stats->syncp);
if (mtk_is_netsys_v3_or_greater(eth))
mac->hw_stats->reg_offset = id * 0x80;
else
mac->hw_stats->reg_offset = id * 0x40;
/* phylink create */
err = of_get_phy_mode(np, &phy_mode);
if (err) {
dev_err(eth->dev, "incorrect phy-mode\n");
goto free_netdev;
}
/* mac config is not set */
mac->interface = PHY_INTERFACE_MODE_NA;
mac->speed = SPEED_UNKNOWN;
mac->phylink_config.dev = ð->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
/* MT7623 gmac0 is now missing its speed-specific PLL configuration
* in its .mac_config method (since state->speed is not valid there.
* Disable support for MII, GMII and RGMII.
*/
if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
__set_bit(PHY_INTERFACE_MODE_MII,
mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_GMII,
mac->phylink_config.supported_interfaces);
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
}
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
__set_bit(PHY_INTERFACE_MODE_TRGMII,
mac->phylink_config.supported_interfaces);
/* TRGMII is not permitted on MT7621 if using DDR2 */
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
if (val & SYSCFG_DRAM_TYPE_DDR2)
__clear_bit(PHY_INTERFACE_MODE_TRGMII,
mac->phylink_config.supported_interfaces);
}
if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
__set_bit(PHY_INTERFACE_MODE_SGMII,
mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_2500BASEX,
mac->phylink_config.supported_interfaces);
}
if (mtk_is_netsys_v3_or_greater(mac->hw) &&
MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
id == MTK_GMAC1_ID) {
mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
MAC_SYM_PAUSE |
MAC_10000FD;
phy_interface_zero(mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
mac->phylink_config.supported_interfaces);
}
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
goto free_netdev;
}
mac->phylink = phylink;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->hw_features = eth->soc->hw_features;
if (eth->hwlro)
eth->netdev[id]->hw_features |= NETIF_F_LRO;
eth->netdev[id]->vlan_features = eth->soc->hw_features &
~NETIF_F_HW_VLAN_CTAG_TX;
eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
else
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
mac->device_notifier.notifier_call = mtk_device_event;
register_netdevice_notifier(&mac->device_notifier);
}
if (mtk_page_pool_enabled(eth))
eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
return 0;
free_netdev:
free_netdev(eth->netdev[id]);
return err;
}
void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
{
struct net_device *dev, *tmp;
LIST_HEAD(dev_list);
int i;
rtnl_lock();
for (i = 0; i < MTK_MAX_DEVS; i++) {
dev = eth->netdev[i];
if (!dev || !(dev->flags & IFF_UP))
continue;
list_add_tail(&dev->close_list, &dev_list);
}
dev_close_many(&dev_list, false);
eth->dma_dev = dma_dev;
list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
list_del_init(&dev->close_list);
dev_open(dev, NULL);
}
rtnl_unlock();
}
static int mtk_sgmii_init(struct mtk_eth *eth)
{
struct device_node *np;
struct regmap *regmap;
u32 flags;
int i;
for (i = 0; i < MTK_MAX_DEVS; i++) {
np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
if (!np)
break;
regmap = syscon_node_to_regmap(np);
flags = 0;
if (of_property_read_bool(np, "mediatek,pnswap"))
flags |= MTK_SGMII_FLAG_PN_SWAP;
of_node_put(np);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
eth->soc->ana_rgc3,
flags);
}
return 0;
}
static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = NULL, *res_sram;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
if (!eth)
return -ENOMEM;
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
eth->dma_dev = &pdev->dev;
eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
/* SRAM is actual memory and supports transparent access just like DRAM.
* Hence we don't require __iomem being set and don't need to use accessor
* functions to read from or write to SRAM.
*/
if (mtk_is_netsys_v3_or_greater(eth)) {
eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(eth->sram_base))
return PTR_ERR(eth->sram_base);
} else {
eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
}
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
if (err) {
dev_err(&pdev->dev, "Wrong DMA config\n");
return -EINVAL;
}
}
spin_lock_init(ð->page_lock);
spin_lock_init(ð->tx_irq_lock);
spin_lock_init(ð->rx_irq_lock);
spin_lock_init(ð->dim_lock);
eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(ð->rx_dim.work, mtk_dim_rx);
INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work);
eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(ð->tx_dim.work, mtk_dim_tx);
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,ethsys");
if (IS_ERR(eth->ethsys)) {
dev_err(&pdev->dev, "no ethsys regmap found\n");
return PTR_ERR(eth->ethsys);
}
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,infracfg");
if (IS_ERR(eth->infra)) {
dev_err(&pdev->dev, "no infracfg regmap found\n");
return PTR_ERR(eth->infra);
}
}
if (of_dma_is_coherent(pdev->dev.of_node)) {
struct regmap *cci;
cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"cci-control-port");
/* enable CPU/bus coherency */
if (!IS_ERR(cci))
regmap_write(cci, 0, 3);
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
err = mtk_sgmii_init(eth);
if (err)
return err;
}
if (eth->soc->required_pctl) {
eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,pctl");
if (IS_ERR(eth->pctl)) {
dev_err(&pdev->dev, "no pctl regmap found\n");
err = PTR_ERR(eth->pctl);
goto err_destroy_sgmii;
}
}
if (mtk_is_netsys_v2_or_greater(eth)) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -EINVAL;
goto err_destroy_sgmii;
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
if (mtk_is_netsys_v3_or_greater(eth)) {
res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res_sram) {
err = -EINVAL;
goto err_destroy_sgmii;
}
eth->phy_scratch_ring = res_sram->start;
} else {
eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
}
}
}
if (eth->soc->offload_version) {
for (i = 0;; i++) {
struct device_node *np;
phys_addr_t wdma_phy;
u32 wdma_base;
if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
break;
np = of_parse_phandle(pdev->dev.of_node,
"mediatek,wed", i);
if (!np)
break;
wdma_base = eth->soc->reg_map->wdma_base[i];
wdma_phy = res ? res->start + wdma_base : 0;
mtk_wed_add_hw(np, eth, eth->base + wdma_base,
wdma_phy, i);
}
}
for (i = 0; i < 3; i++) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
eth->irq[i] = eth->irq[0];
else
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
err = -ENXIO;
goto err_wed_exit;
}
}
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
if (IS_ERR(eth->clks[i])) {
if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_wed_exit;
}
if (eth->soc->required_clks & BIT(i)) {
dev_err(&pdev->dev, "clock %s not found\n",
mtk_clks_source_name[i]);
err = -EINVAL;
goto err_wed_exit;
}
eth->clks[i] = NULL;
}
}
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(ð->pending_work, mtk_pending_work);
err = mtk_hw_init(eth, false);
if (err)
goto err_wed_exit;
eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
for_each_child_of_node(pdev->dev.of_node, mac_np) {
if (!of_device_is_compatible(mac_np,
"mediatek,eth-mac"))
continue;
if (!of_device_is_available(mac_np))
continue;
err = mtk_add_mac(eth, mac_np);
if (err) {
of_node_put(mac_np);
goto err_deinit_hw;
}
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
err = devm_request_irq(eth->dev, eth->irq[0],
mtk_handle_irq, 0,
dev_name(eth->dev), eth);
} else {
err = devm_request_irq(eth->dev, eth->irq[1],
mtk_handle_irq_tx, 0,
dev_name(eth->dev), eth);
if (err)
goto err_free_dev;
err = devm_request_irq(eth->dev, eth->irq[2],
mtk_handle_irq_rx, 0,
dev_name(eth->dev), eth);
}
if (err)
goto err_free_dev;
/* No MT7628/88 support yet */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
err = mtk_mdio_init(eth);
if (err)
goto err_free_dev;
}
if (eth->soc->offload_version) {
u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
for (i = 0; i < num_ppe; i++) {
u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
if (!eth->ppe[i]) {
err = -ENOMEM;
goto err_deinit_ppe;
}
}
err = mtk_eth_offload_init(eth);
if (err)
goto err_deinit_ppe;
}
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
err = register_netdev(eth->netdev[i]);
if (err) {
dev_err(eth->dev, "error bringing up device\n");
goto err_deinit_ppe;
} else
netif_info(eth, probe, eth->netdev[i],
"mediatek frame engine at 0x%08lx, irq %d\n",
eth->netdev[i]->base_addr, eth->irq[0]);
}
/* we run 2 devices on the same DMA ring so we need a dummy device
* for NAPI to work
*/
init_dummy_netdev(ð->dummy_dev);
netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx);
netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
schedule_delayed_work(ð->reset.monitor_work,
MTK_DMA_MONITOR_TIMEOUT);
return 0;
err_deinit_ppe:
mtk_ppe_deinit(eth);
mtk_mdio_cleanup(eth);
err_free_dev:
mtk_free_dev(eth);
err_deinit_hw:
mtk_hw_deinit(eth);
err_wed_exit:
mtk_wed_exit();
err_destroy_sgmii:
mtk_sgmii_destroy(eth);
return err;
}
static int mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
struct mtk_mac *mac;
int i;
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
mtk_stop(eth->netdev[i]);
mac = netdev_priv(eth->netdev[i]);
phylink_disconnect_phy(mac->phylink);
}
mtk_wed_exit();
mtk_hw_deinit(eth);
netif_napi_del(ð->tx_napi);
netif_napi_del(ð->rx_napi);
mtk_cleanup(eth);
mtk_mdio_cleanup(eth);
return 0;
}
static const struct mtk_soc_data mt2701_data = {
.reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
static const struct mtk_soc_data mt7621_data = {
.reg_map = &mtk_reg_map,
.caps = MT7621_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
static const struct mtk_soc_data mt7622_data = {
.reg_map = &mtk_reg_map,
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
.offload_version = 2,
.hash_offset = 2,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
static const struct mtk_soc_data mt7623_data = {
.reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
static const struct mtk_soc_data mt7629_data = {
.reg_map = &mtk_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
.has_accounting = true,
.version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
static const struct mtk_soc_data mt7981_data = {
.reg_map = &mt7986_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7981_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7981_CLKS_BITMAP,
.required_pctl = false,
.version = 2,
.offload_version = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
};
static const struct mtk_soc_data mt7986_data = {
.reg_map = &mt7986_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7986_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7986_CLKS_BITMAP,
.required_pctl = false,
.version = 2,
.offload_version = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
};
static const struct mtk_soc_data mt7988_data = {
.reg_map = &mt7988_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7988_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7988_CLKS_BITMAP,
.required_pctl = false,
.version = 3,
.offload_version = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma_v2),
.rxd_size = sizeof(struct mtk_rx_dma_v2),
.rx_irq_done_mask = MTK_RX_DONE_INT_V2,
.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
},
};
static const struct mtk_soc_data rt5350_data = {
.reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS,
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
.txrx = {
.txd_size = sizeof(struct mtk_tx_dma),
.rxd_size = sizeof(struct mtk_rx_dma),
.rx_irq_done_mask = MTK_RX_DONE_INT,
.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
{ .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
{ .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
{ .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
.remove = mtk_remove,
.driver = {
.name = "mtk_soc_eth",
.of_match_table = of_mtk_match,
},
};
module_platform_driver(mtk_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <[email protected]>");
MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2021 Felix Fietkau <[email protected]> */
#include <linux/seq_file.h>
#include <linux/soc/mediatek/mtk_wed.h>
#include "mtk_wed.h"
#include "mtk_wed_regs.h"
struct reg_dump {
const char *name;
u16 offset;
u8 type;
u8 base;
};
enum {
DUMP_TYPE_STRING,
DUMP_TYPE_WED,
DUMP_TYPE_WDMA,
DUMP_TYPE_WPDMA_TX,
DUMP_TYPE_WPDMA_TXFREE,
DUMP_TYPE_WPDMA_RX,
DUMP_TYPE_WED_RRO,
};
#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
#define DUMP_RING(_prefix, _base, ...) \
{ _prefix " BASE", _base, __VA_ARGS__ }, \
{ _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
{ _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
{ _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
static void
print_reg_val(struct seq_file *s, const char *name, u32 val)
{
seq_printf(s, "%-32s %08x\n", name, val);
}
static void
dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
const struct reg_dump *regs, int n_regs)
{
const struct reg_dump *cur;
u32 val;
for (cur = regs; cur < ®s[n_regs]; cur++) {
switch (cur->type) {
case DUMP_TYPE_STRING:
seq_printf(s, "%s======== %s:\n",
cur > regs ? "\n" : "",
cur->name);
continue;
case DUMP_TYPE_WED_RRO:
case DUMP_TYPE_WED:
val = wed_r32(dev, cur->offset);
break;
case DUMP_TYPE_WDMA:
val = wdma_r32(dev, cur->offset);
break;
case DUMP_TYPE_WPDMA_TX:
val = wpdma_tx_r32(dev, cur->base, cur->offset);
break;
case DUMP_TYPE_WPDMA_TXFREE:
val = wpdma_txfree_r32(dev, cur->offset);
break;
case DUMP_TYPE_WPDMA_RX:
val = wpdma_rx_r32(dev, cur->base, cur->offset);
break;
}
print_reg_val(s, cur->name, val);
}
}
static int
wed_txinfo_show(struct seq_file *s, void *data)
{
static const struct reg_dump regs[] = {
DUMP_STR("WED TX"),
DUMP_WED(WED_TX_MIB(0)),
DUMP_WED_RING(WED_RING_TX(0)),
DUMP_WED(WED_TX_MIB(1)),
DUMP_WED_RING(WED_RING_TX(1)),
DUMP_STR("WPDMA TX"),
DUMP_WED(WED_WPDMA_TX_MIB(0)),
DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
DUMP_WED(WED_WPDMA_TX_MIB(1)),
DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
DUMP_STR("WPDMA TX"),
DUMP_WPDMA_TX_RING(0),
DUMP_WPDMA_TX_RING(1),
DUMP_STR("WED WDMA RX"),
DUMP_WED(WED_WDMA_RX_MIB(0)),
DUMP_WED_RING(WED_WDMA_RING_RX(0)),
DUMP_WED(WED_WDMA_RX_THRES(0)),
DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
DUMP_WED(WED_WDMA_RX_MIB(1)),
DUMP_WED_RING(WED_WDMA_RING_RX(1)),
DUMP_WED(WED_WDMA_RX_THRES(1)),
DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
DUMP_STR("WDMA RX"),
DUMP_WDMA(WDMA_GLO_CFG),
DUMP_WDMA_RING(WDMA_RING_RX(0)),
DUMP_WDMA_RING(WDMA_RING_RX(1)),
DUMP_STR("WED TX FREE"),
DUMP_WED(WED_RX_MIB(0)),
DUMP_WED_RING(WED_RING_RX(0)),
DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
DUMP_WED(WED_RX_MIB(1)),
DUMP_WED_RING(WED_RING_RX(1)),
DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
DUMP_STR("WED WPDMA TX FREE"),
DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
if (dev)
dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
static int
wed_rxinfo_show(struct seq_file *s, void *data)
{
static const struct reg_dump regs[] = {
DUMP_STR("WPDMA RX"),
DUMP_WPDMA_RX_RING(0),
DUMP_WPDMA_RX_RING(1),
DUMP_STR("WPDMA RX"),
DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
DUMP_STR("WED RX"),
DUMP_WED_RING(WED_RING_RX_DATA(0)),
DUMP_WED_RING(WED_RING_RX_DATA(1)),
DUMP_STR("WED RRO"),
DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
DUMP_WED(WED_RROQM_MID_MIB),
DUMP_WED(WED_RROQM_MOD_MIB),
DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
DUMP_WED(WED_RROQM_FDBK_IND_MIB),
DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
DUMP_STR("WED Route QM"),
DUMP_WED(WED_RTQM_R2H_MIB(0)),
DUMP_WED(WED_RTQM_R2Q_MIB(0)),
DUMP_WED(WED_RTQM_Q2H_MIB(0)),
DUMP_WED(WED_RTQM_R2H_MIB(1)),
DUMP_WED(WED_RTQM_R2Q_MIB(1)),
DUMP_WED(WED_RTQM_Q2H_MIB(1)),
DUMP_WED(WED_RTQM_Q2N_MIB),
DUMP_WED(WED_RTQM_Q2B_MIB),
DUMP_WED(WED_RTQM_PFDBK_MIB),
DUMP_STR("WED WDMA TX"),
DUMP_WED(WED_WDMA_TX_MIB),
DUMP_WED_RING(WED_WDMA_RING_TX),
DUMP_STR("WDMA TX"),
DUMP_WDMA(WDMA_GLO_CFG),
DUMP_WDMA_RING(WDMA_RING_TX(0)),
DUMP_WDMA_RING(WDMA_RING_TX(1)),
DUMP_STR("WED RX BM"),
DUMP_WED(WED_RX_BM_BASE),
DUMP_WED(WED_RX_BM_RX_DMAD),
DUMP_WED(WED_RX_BM_PTR),
DUMP_WED(WED_RX_BM_TKID_MIB),
DUMP_WED(WED_RX_BM_BLEN),
DUMP_WED(WED_RX_BM_STS),
DUMP_WED(WED_RX_BM_INTF2),
DUMP_WED(WED_RX_BM_INTF),
DUMP_WED(WED_RX_BM_ERR_STS),
};
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
if (dev)
dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
static int
mtk_wed_reg_set(void *data, u64 val)
{
struct mtk_wed_hw *hw = data;
regmap_write(hw->regs, hw->debugfs_reg, val);
return 0;
}
static int
mtk_wed_reg_get(void *data, u64 *val)
{
struct mtk_wed_hw *hw = data;
unsigned int regval;
int ret;
ret = regmap_read(hw->regs, hw->debugfs_reg, ®val);
if (ret)
return ret;
*val = regval;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
"0x%08llx\n");
void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
{
struct dentry *dir;
snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
dir = debugfs_create_dir(hw->dirname, NULL);
hw->debugfs_dir = dir;
debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
if (hw->version != 1)
debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
&wed_rxinfo_fops);
}
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018-2019 MediaTek Inc.
/* A library for configuring path from GMAC/GDM to target PHY
*
* Author: Sean Wang <[email protected]>
*
*/
#include <linux/phy.h>
#include <linux/regmap.h>
#include "mtk_eth_soc.h"
struct mtk_eth_muxc {
const char *name;
int cap_bit;
int (*set_path)(struct mtk_eth *eth, u64 path);
};
static const char *mtk_eth_path_name(u64 path)
{
switch (path) {
case MTK_ETH_PATH_GMAC1_RGMII:
return "gmac1_rgmii";
case MTK_ETH_PATH_GMAC1_TRGMII:
return "gmac1_trgmii";
case MTK_ETH_PATH_GMAC1_SGMII:
return "gmac1_sgmii";
case MTK_ETH_PATH_GMAC2_RGMII:
return "gmac2_rgmii";
case MTK_ETH_PATH_GMAC2_SGMII:
return "gmac2_sgmii";
case MTK_ETH_PATH_GMAC2_GEPHY:
return "gmac2_gephy";
case MTK_ETH_PATH_GDM1_ESW:
return "gdm1_esw";
default:
return "unknown path";
}
}
static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, u64 path)
{
bool updated = true;
u32 mask, set, reg;
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
mask = ~(u32)MTK_MUX_TO_ESW;
set = 0;
break;
case MTK_ETH_PATH_GDM1_ESW:
mask = ~(u32)MTK_MUX_TO_ESW;
set = MTK_MUX_TO_ESW;
break;
default:
updated = false;
break;
}
if (mtk_is_netsys_v3_or_greater(eth))
reg = MTK_MAC_MISC_V3;
else
reg = MTK_MAC_MISC;
if (updated)
mtk_m32(eth, mask, set, reg);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
switch (path) {
case MTK_ETH_PATH_GMAC2_GEPHY:
val = ~(u32)GEPHY_MAC_SEL;
break;
default:
updated = false;
break;
}
if (updated)
regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0, mask = 0, reg = 0;
bool updated = true;
switch (path) {
case MTK_ETH_PATH_GMAC2_SGMII:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_U3_COPHY_V2)) {
reg = USB_PHY_SWITCH_REG;
val = SGMII_QPHY_SEL;
mask = QPHY_SEL_MASK;
} else {
reg = INFRA_MISC2;
val = CO_QPHY_SEL;
mask = val;
}
break;
default:
updated = false;
break;
}
if (updated)
regmap_update_bits(eth->infra, reg, mask, val);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
val = SYSCFG0_SGMII_GMAC1;
break;
case MTK_ETH_PATH_GMAC2_SGMII:
val = SYSCFG0_SGMII_GMAC2;
break;
case MTK_ETH_PATH_GMAC1_RGMII:
case MTK_ETH_PATH_GMAC2_RGMII:
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
val &= SYSCFG0_SGMII_MASK;
if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
(path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
val = 0;
else
updated = false;
break;
default:
updated = false;
break;
}
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
SYSCFG0_SGMII_MASK, val);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, u64 path)
{
unsigned int val = 0;
bool updated = true;
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
switch (path) {
case MTK_ETH_PATH_GMAC1_SGMII:
val |= SYSCFG0_SGMII_GMAC1_V2;
break;
case MTK_ETH_PATH_GMAC2_GEPHY:
val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
break;
case MTK_ETH_PATH_GMAC2_SGMII:
val |= SYSCFG0_SGMII_GMAC2_V2;
break;
default:
updated = false;
}
if (updated)
regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
SYSCFG0_SGMII_MASK, val);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
return 0;
}
static const struct mtk_eth_muxc mtk_eth_muxc[] = {
{
.name = "mux_gdm1_to_gmac1_esw",
.cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
.set_path = set_mux_gdm1_to_gmac1_esw,
}, {
.name = "mux_gmac2_gmac0_to_gephy",
.cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
.set_path = set_mux_gmac2_gmac0_to_gephy,
}, {
.name = "mux_u3_gmac2_to_qphy",
.cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
.set_path = set_mux_u3_gmac2_to_qphy,
}, {
.name = "mux_gmac1_gmac2_to_sgmii_rgmii",
.cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
.set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
}, {
.name = "mux_gmac12_to_gephy_sgmii",
.cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
.set_path = set_mux_gmac12_to_gephy_sgmii,
},
};
static int mtk_eth_mux_setup(struct mtk_eth *eth, u64 path)
{
int i, err = 0;
if (!MTK_HAS_CAPS(eth->soc->caps, path)) {
dev_err(eth->dev, "path %s isn't support on the SoC\n",
mtk_eth_path_name(path));
return -EINVAL;
}
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
return 0;
/* Setup MUX in path fabric */
for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) {
if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) {
err = mtk_eth_muxc[i].set_path(eth, path);
if (err)
goto out;
} else {
dev_dbg(eth->dev, "mux %s isn't present on the SoC\n",
mtk_eth_muxc[i].name);
}
}
out:
return err;
}
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
u64 path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
/* Setup proper MUXes along the path */
return mtk_eth_mux_setup(eth, path);
}
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
u64 path = 0;
if (mac_id == 1)
path = MTK_ETH_PATH_GMAC2_GEPHY;
if (!path)
return -EINVAL;
/* Setup proper MUXes along the path */
return mtk_eth_mux_setup(eth, path);
}
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
u64 path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
MTK_ETH_PATH_GMAC2_RGMII;
/* Setup proper MUXes along the path */
return mtk_eth_mux_setup(eth, path);
}
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_eth_path.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020 MediaTek Corporation
* Copyright (c) 2020 BayLibre SAS
*
* Author: Bartosz Golaszewski <[email protected]>
*/
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/compiler.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#define MTK_STAR_DRVNAME "mtk_star_emac"
#define MTK_STAR_WAIT_TIMEOUT 300
#define MTK_STAR_MAX_FRAME_SIZE 1514
#define MTK_STAR_SKB_ALIGNMENT 16
#define MTK_STAR_HASHTABLE_MC_LIMIT 256
#define MTK_STAR_HASHTABLE_SIZE_MAX 512
#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
* work for this controller.
*/
#define MTK_STAR_IP_ALIGN 2
static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
/* PHY Control Register 0 */
#define MTK_STAR_REG_PHY_CTRL0 0x0000
#define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13)
#define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14)
#define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15)
#define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8)
#define MTK_STAR_OFF_PHY_CTRL0_PREG 8
#define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16)
#define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16
/* PHY Control Register 1 */
#define MTK_STAR_REG_PHY_CTRL1 0x0004
#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0)
#define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8)
#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9
#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00
#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01
#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02
#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11)
#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12)
#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13)
/* MAC Configuration Register */
#define MTK_STAR_REG_MAC_CFG 0x0008
#define MTK_STAR_OFF_MAC_CFG_IPG 10
#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0)
#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16)
#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19)
#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20)
#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22)
#define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31)
/* Flow-Control Configuration Register */
#define MTK_STAR_REG_FC_CFG 0x000c
#define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7)
#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8)
#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16
#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16)
#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800
/* ARL Configuration Register */
#define MTK_STAR_REG_ARL_CFG 0x0010
#define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0)
#define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4)
/* MAC High and Low Bytes Registers */
#define MTK_STAR_REG_MY_MAC_H 0x0014
#define MTK_STAR_REG_MY_MAC_L 0x0018
/* Hash Table Control Register */
#define MTK_STAR_REG_HASH_CTRL 0x001c
#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0)
#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12)
#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13)
#define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14)
#define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16)
#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17)
#define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31)
/* TX DMA Control Register */
#define MTK_STAR_REG_TX_DMA_CTRL 0x0034
#define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0)
#define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1)
#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2)
/* RX DMA Control Register */
#define MTK_STAR_REG_RX_DMA_CTRL 0x0038
#define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0)
#define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1)
#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2)
/* DMA Address Registers */
#define MTK_STAR_REG_TX_DPTR 0x003c
#define MTK_STAR_REG_RX_DPTR 0x0040
#define MTK_STAR_REG_TX_BASE_ADDR 0x0044
#define MTK_STAR_REG_RX_BASE_ADDR 0x0048
/* Interrupt Status Register */
#define MTK_STAR_REG_INT_STS 0x0050
#define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2)
#define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3)
#define MTK_STAR_BIT_INT_STS_FNRC BIT(6)
#define MTK_STAR_BIT_INT_STS_TNTC BIT(8)
/* Interrupt Mask Register */
#define MTK_STAR_REG_INT_MASK 0x0054
#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
/* Delay-Macro Register */
#define MTK_STAR_REG_TEST0 0x0058
#define MTK_STAR_BIT_INV_RX_CLK BIT(30)
#define MTK_STAR_BIT_INV_TX_CLK BIT(31)
/* Misc. Config Register */
#define MTK_STAR_REG_TEST1 0x005c
#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
/* Extended Configuration Register */
#define MTK_STAR_REG_EXT_CFG 0x0060
#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16
#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16)
#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400
/* EthSys Configuration Register */
#define MTK_STAR_REG_SYS_CONF 0x0094
#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0)
#define MTK_STAR_BIT_EXT_MDC_MODE BIT(1)
#define MTK_STAR_BIT_SWC_MII_MODE BIT(2)
/* MAC Clock Configuration Register */
#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
#define MTK_STAR_BIT_CLK_DIV_10 0x0a
#define MTK_STAR_BIT_CLK_DIV_50 0x32
/* Counter registers. */
#define MTK_STAR_REG_C_RXOKPKT 0x0100
#define MTK_STAR_REG_C_RXOKBYTE 0x0104
#define MTK_STAR_REG_C_RXRUNT 0x0108
#define MTK_STAR_REG_C_RXLONG 0x010c
#define MTK_STAR_REG_C_RXDROP 0x0110
#define MTK_STAR_REG_C_RXCRC 0x0114
#define MTK_STAR_REG_C_RXARLDROP 0x0118
#define MTK_STAR_REG_C_RXVLANDROP 0x011c
#define MTK_STAR_REG_C_RXCSERR 0x0120
#define MTK_STAR_REG_C_RXPAUSE 0x0124
#define MTK_STAR_REG_C_TXOKPKT 0x0128
#define MTK_STAR_REG_C_TXOKBYTE 0x012c
#define MTK_STAR_REG_C_TXPAUSECOL 0x0130
#define MTK_STAR_REG_C_TXRTY 0x0134
#define MTK_STAR_REG_C_TXSKIP 0x0138
#define MTK_STAR_REG_C_TX_ARP 0x013c
#define MTK_STAR_REG_C_RX_RERR 0x01d8
#define MTK_STAR_REG_C_RX_UNI 0x01dc
#define MTK_STAR_REG_C_RX_MULTI 0x01e0
#define MTK_STAR_REG_C_RX_BROAD 0x01e4
#define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8
#define MTK_STAR_REG_C_TX_UNI 0x01ec
#define MTK_STAR_REG_C_TX_MULTI 0x01f0
#define MTK_STAR_REG_C_TX_BROAD 0x01f4
#define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8
#define MTK_STAR_REG_C_TX_LATECOL 0x01fc
#define MTK_STAR_REG_C_RX_LENGTHERR 0x0214
#define MTK_STAR_REG_C_RX_TWIST 0x0218
/* Ethernet CFG Control */
#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4
#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8
#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10
#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0)
#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0
#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1
#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0)
#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8)
/* Represents the actual structure of descriptors used by the MAC. We can
* reuse the same structure for both TX and RX - the layout is the same, only
* the flags differ slightly.
*/
struct mtk_star_ring_desc {
/* Contains both the status flags as well as packet length. */
u32 status;
u32 data_ptr;
u32 vtag;
u32 reserved;
};
#define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0)
#define MTK_STAR_DESC_BIT_RX_CRCE BIT(24)
#define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25)
#define MTK_STAR_DESC_BIT_INT BIT(27)
#define MTK_STAR_DESC_BIT_LS BIT(28)
#define MTK_STAR_DESC_BIT_FS BIT(29)
#define MTK_STAR_DESC_BIT_EOR BIT(30)
#define MTK_STAR_DESC_BIT_COWN BIT(31)
/* Helper structure for storing data read from/written to descriptors in order
* to limit reads from/writes to DMA memory.
*/
struct mtk_star_ring_desc_data {
unsigned int len;
unsigned int flags;
dma_addr_t dma_addr;
struct sk_buff *skb;
};
#define MTK_STAR_RING_NUM_DESCS 512
#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
#define MTK_STAR_DMA_SIZE \
(MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
struct mtk_star_ring {
struct mtk_star_ring_desc *descs;
struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
unsigned int head;
unsigned int tail;
};
struct mtk_star_compat {
int (*set_interface_mode)(struct net_device *ndev);
unsigned char bit_clk_div;
};
struct mtk_star_priv {
struct net_device *ndev;
struct regmap *regs;
struct regmap *pericfg;
struct clk_bulk_data clks[MTK_STAR_NCLKS];
void *ring_base;
struct mtk_star_ring_desc *descs_base;
dma_addr_t dma_addr;
struct mtk_star_ring tx_ring;
struct mtk_star_ring rx_ring;
struct mii_bus *mii;
struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct device_node *phy_node;
phy_interface_t phy_intf;
struct phy_device *phydev;
unsigned int link;
int speed;
int duplex;
int pause;
bool rmii_rxc;
bool rx_inv;
bool tx_inv;
const struct mtk_star_compat *compat_data;
/* Protects against concurrent descriptor access. */
spinlock_t lock;
struct rtnl_link_stats64 stats;
};
static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
{
return priv->ndev->dev.parent;
}
static const struct regmap_config mtk_star_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.disable_locking = true,
};
static void mtk_star_ring_init(struct mtk_star_ring *ring,
struct mtk_star_ring_desc *descs)
{
memset(ring, 0, sizeof(*ring));
ring->descs = descs;
ring->head = 0;
ring->tail = 0;
}
static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
struct mtk_star_ring_desc_data *desc_data)
{
struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
unsigned int status;
status = READ_ONCE(desc->status);
dma_rmb(); /* Make sure we read the status bits before checking it. */
if (!(status & MTK_STAR_DESC_BIT_COWN))
return -1;
desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
desc_data->dma_addr = ring->dma_addrs[ring->tail];
desc_data->skb = ring->skbs[ring->tail];
ring->dma_addrs[ring->tail] = 0;
ring->skbs[ring->tail] = NULL;
status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
WRITE_ONCE(desc->data_ptr, 0);
WRITE_ONCE(desc->status, status);
ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
return 0;
}
static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
struct mtk_star_ring_desc_data *desc_data,
unsigned int flags)
{
struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
unsigned int status;
status = READ_ONCE(desc->status);
ring->skbs[ring->head] = desc_data->skb;
ring->dma_addrs[ring->head] = desc_data->dma_addr;
status |= desc_data->len;
if (flags)
status |= flags;
WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
WRITE_ONCE(desc->status, status);
status &= ~MTK_STAR_DESC_BIT_COWN;
/* Flush previous modifications before ownership change. */
dma_wmb();
WRITE_ONCE(desc->status, status);
ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
}
static void
mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
struct mtk_star_ring_desc_data *desc_data)
{
mtk_star_ring_push_head(ring, desc_data, 0);
}
static void
mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
struct mtk_star_ring_desc_data *desc_data)
{
static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
MTK_STAR_DESC_BIT_LS |
MTK_STAR_DESC_BIT_INT;
mtk_star_ring_push_head(ring, desc_data, flags);
}
static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
{
u32 avail;
if (ring->tail > ring->head)
avail = ring->tail - ring->head - 1;
else
avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
return avail;
}
static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
struct sk_buff *skb)
{
struct device *dev = mtk_star_get_dev(priv);
/* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
return dma_map_single(dev, skb_tail_pointer(skb) - 2,
skb_tailroom(skb), DMA_FROM_DEVICE);
}
static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
struct mtk_star_ring_desc_data *desc_data)
{
struct device *dev = mtk_star_get_dev(priv);
dma_unmap_single(dev, desc_data->dma_addr,
skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
}
static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
struct sk_buff *skb)
{
struct device *dev = mtk_star_get_dev(priv);
return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
}
static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
struct mtk_star_ring_desc_data *desc_data)
{
struct device *dev = mtk_star_get_dev(priv);
return dma_unmap_single(dev, desc_data->dma_addr,
skb_headlen(desc_data->skb), DMA_TO_DEVICE);
}
static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
{
regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
MTK_STAR_BIT_MAC_CFG_NIC_PD);
}
static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
bool rx, bool tx)
{
u32 value;
regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
if (tx)
value &= ~MTK_STAR_BIT_INT_STS_TNTC;
if (rx)
value &= ~MTK_STAR_BIT_INT_STS_FNRC;
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
}
static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
bool rx, bool tx)
{
u32 value;
regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
if (tx)
value |= MTK_STAR_BIT_INT_STS_TNTC;
if (rx)
value |= MTK_STAR_BIT_INT_STS_FNRC;
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
}
/* Unmask the three interrupts we care about, mask all others. */
static void mtk_star_intr_enable(struct mtk_star_priv *priv)
{
unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
MTK_STAR_BIT_INT_STS_FNRC |
MTK_STAR_REG_INT_STS_MIB_CNT_TH;
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
}
static void mtk_star_intr_disable(struct mtk_star_priv *priv)
{
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
}
static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
{
unsigned int val;
regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
return val;
}
static void mtk_star_dma_init(struct mtk_star_priv *priv)
{
struct mtk_star_ring_desc *desc;
unsigned int val;
int i;
priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
desc = &priv->descs_base[i];
memset(desc, 0, sizeof(*desc));
desc->status = MTK_STAR_DESC_BIT_COWN;
if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
(i == MTK_STAR_NUM_DESCS_TOTAL - 1))
desc->status |= MTK_STAR_DESC_BIT_EOR;
}
mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
mtk_star_ring_init(&priv->rx_ring,
priv->descs_base + MTK_STAR_NUM_TX_DESCS);
/* Set DMA pointers. */
val = (unsigned int)priv->dma_addr;
regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
}
static void mtk_star_dma_start(struct mtk_star_priv *priv)
{
regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
MTK_STAR_BIT_TX_DMA_CTRL_START);
regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
MTK_STAR_BIT_RX_DMA_CTRL_START);
}
static void mtk_star_dma_stop(struct mtk_star_priv *priv)
{
regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
MTK_STAR_BIT_TX_DMA_CTRL_STOP);
regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
MTK_STAR_BIT_RX_DMA_CTRL_STOP);
}
static void mtk_star_dma_disable(struct mtk_star_priv *priv)
{
int i;
mtk_star_dma_stop(priv);
/* Take back all descriptors. */
for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
}
static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
{
regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
}
static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
{
regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
}
static void mtk_star_set_mac_addr(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
const u8 *mac_addr = ndev->dev_addr;
unsigned int high, low;
high = mac_addr[0] << 8 | mac_addr[1] << 0;
low = mac_addr[2] << 24 | mac_addr[3] << 16 |
mac_addr[4] << 8 | mac_addr[5];
regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
}
static void mtk_star_reset_counters(struct mtk_star_priv *priv)
{
static const unsigned int counter_regs[] = {
MTK_STAR_REG_C_RXOKPKT,
MTK_STAR_REG_C_RXOKBYTE,
MTK_STAR_REG_C_RXRUNT,
MTK_STAR_REG_C_RXLONG,
MTK_STAR_REG_C_RXDROP,
MTK_STAR_REG_C_RXCRC,
MTK_STAR_REG_C_RXARLDROP,
MTK_STAR_REG_C_RXVLANDROP,
MTK_STAR_REG_C_RXCSERR,
MTK_STAR_REG_C_RXPAUSE,
MTK_STAR_REG_C_TXOKPKT,
MTK_STAR_REG_C_TXOKBYTE,
MTK_STAR_REG_C_TXPAUSECOL,
MTK_STAR_REG_C_TXRTY,
MTK_STAR_REG_C_TXSKIP,
MTK_STAR_REG_C_TX_ARP,
MTK_STAR_REG_C_RX_RERR,
MTK_STAR_REG_C_RX_UNI,
MTK_STAR_REG_C_RX_MULTI,
MTK_STAR_REG_C_RX_BROAD,
MTK_STAR_REG_C_RX_ALIGNERR,
MTK_STAR_REG_C_TX_UNI,
MTK_STAR_REG_C_TX_MULTI,
MTK_STAR_REG_C_TX_BROAD,
MTK_STAR_REG_C_TX_TIMEOUT,
MTK_STAR_REG_C_TX_LATECOL,
MTK_STAR_REG_C_RX_LENGTHERR,
MTK_STAR_REG_C_RX_TWIST,
};
unsigned int i, val;
for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
regmap_read(priv->regs, counter_regs[i], &val);
}
static void mtk_star_update_stat(struct mtk_star_priv *priv,
unsigned int reg, u64 *stat)
{
unsigned int val;
regmap_read(priv->regs, reg, &val);
*stat += val;
}
/* Try to get as many stats as possible from the internal registers instead
* of tracking them ourselves.
*/
static void mtk_star_update_stats(struct mtk_star_priv *priv)
{
struct rtnl_link_stats64 *stats = &priv->stats;
/* OK packets and bytes. */
mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
/* RX & TX multicast. */
mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
/* Collisions. */
mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
&stats->collisions);
mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
&stats->collisions);
mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
/* RX Errors. */
mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
&stats->rx_length_errors);
mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
&stats->rx_over_errors);
mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
&stats->rx_frame_errors);
mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
&stats->rx_fifo_errors);
/* Sum of the general RX error counter + all of the above. */
mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
stats->rx_errors += stats->rx_length_errors;
stats->rx_errors += stats->rx_over_errors;
stats->rx_errors += stats->rx_crc_errors;
stats->rx_errors += stats->rx_frame_errors;
stats->rx_errors += stats->rx_fifo_errors;
}
static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
{
uintptr_t tail, offset;
struct sk_buff *skb;
skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
if (!skb)
return NULL;
/* Align to 16 bytes. */
tail = (uintptr_t)skb_tail_pointer(skb);
if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
}
/* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
* extract the Ethernet header (14 bytes) so we need two more bytes.
*/
skb_reserve(skb, MTK_STAR_IP_ALIGN);
return skb;
}
static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct mtk_star_ring *ring = &priv->rx_ring;
struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc *desc;
struct sk_buff *skb;
dma_addr_t dma_addr;
int i;
for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
skb = mtk_star_alloc_skb(ndev);
if (!skb)
return -ENOMEM;
dma_addr = mtk_star_dma_map_rx(priv, skb);
if (dma_mapping_error(dev, dma_addr)) {
dev_kfree_skb(skb);
return -ENOMEM;
}
desc = &ring->descs[i];
desc->data_ptr = dma_addr;
desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
desc->status &= ~MTK_STAR_DESC_BIT_COWN;
ring->skbs[i] = skb;
ring->dma_addrs[i] = dma_addr;
}
return 0;
}
static void
mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
void (*unmap_func)(struct mtk_star_priv *,
struct mtk_star_ring_desc_data *))
{
struct mtk_star_ring_desc_data desc_data;
int i;
for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
if (!ring->dma_addrs[i])
continue;
desc_data.dma_addr = ring->dma_addrs[i];
desc_data.skb = ring->skbs[i];
unmap_func(priv, &desc_data);
dev_kfree_skb(desc_data.skb);
}
}
static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
{
struct mtk_star_ring *ring = &priv->rx_ring;
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
}
static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
{
struct mtk_star_ring *ring = &priv->tx_ring;
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
}
/**
* mtk_star_handle_irq - Interrupt Handler.
* @irq: interrupt number.
* @data: pointer to a network interface device structure.
* Description : this is the driver interrupt service routine.
* it mainly handles:
* 1. tx complete interrupt for frame transmission.
* 2. rx complete interrupt for frame reception.
* 3. MAC Management Counter interrupt to avoid counter overflow.
**/
static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{
struct net_device *ndev = data;
struct mtk_star_priv *priv = netdev_priv(ndev);
unsigned int intr_status = mtk_star_intr_ack_all(priv);
bool rx, tx;
rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
napi_schedule_prep(&priv->rx_napi);
tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
napi_schedule_prep(&priv->tx_napi);
if (rx || tx) {
spin_lock(&priv->lock);
/* mask Rx and TX Complete interrupt */
mtk_star_disable_dma_irq(priv, rx, tx);
spin_unlock(&priv->lock);
if (rx)
__napi_schedule(&priv->rx_napi);
if (tx)
__napi_schedule(&priv->tx_napi);
}
/* interrupt is triggered once any counters reach 0x8000000 */
if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
mtk_star_update_stats(priv);
mtk_star_reset_counters(priv);
}
return IRQ_HANDLED;
}
/* Wait for the completion of any previous command - CMD_START bit must be
* cleared by hardware.
*/
static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
{
unsigned int val;
return regmap_read_poll_timeout_atomic(priv->regs,
MTK_STAR_REG_HASH_CTRL, val,
!(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
10, MTK_STAR_WAIT_TIMEOUT);
}
static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
{
unsigned int val;
int ret;
/* Wait for BIST_DONE bit. */
ret = regmap_read_poll_timeout_atomic(priv->regs,
MTK_STAR_REG_HASH_CTRL, val,
val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
10, MTK_STAR_WAIT_TIMEOUT);
if (ret)
return ret;
/* Check the BIST_OK bit. */
if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
MTK_STAR_BIT_HASH_CTRL_BIST_OK))
return -EIO;
return 0;
}
static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
unsigned int hash_addr)
{
unsigned int val;
int ret;
ret = mtk_star_hash_wait_cmd_start(priv);
if (ret)
return ret;
val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
return mtk_star_hash_wait_ok(priv);
}
static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
{
int ret;
ret = mtk_star_hash_wait_cmd_start(priv);
if (ret)
return ret;
regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
MTK_STAR_BIT_HASH_CTRL_BIST_EN);
regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
return mtk_star_hash_wait_ok(priv);
}
static void mtk_star_phy_config(struct mtk_star_priv *priv)
{
unsigned int val;
if (priv->speed == SPEED_1000)
val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
else if (priv->speed == SPEED_100)
val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
else
val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
if (priv->pause) {
val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
} else {
val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
}
regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
}
static void mtk_star_adjust_link(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct phy_device *phydev = priv->phydev;
bool new_state = false;
if (phydev->link) {
if (!priv->link) {
priv->link = phydev->link;
new_state = true;
}
if (priv->speed != phydev->speed) {
priv->speed = phydev->speed;
new_state = true;
}
if (priv->pause != phydev->pause) {
priv->pause = phydev->pause;
new_state = true;
}
} else {
if (priv->link) {
priv->link = phydev->link;
new_state = true;
}
}
if (new_state) {
if (phydev->link)
mtk_star_phy_config(priv);
phy_print_status(ndev->phydev);
}
}
static void mtk_star_init_config(struct mtk_star_priv *priv)
{
unsigned int val;
val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
MTK_STAR_BIT_EXT_MDC_MODE |
MTK_STAR_BIT_SWC_MII_MODE);
regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
MTK_STAR_MSK_MAC_CLK_CONF,
priv->compat_data->bit_clk_div);
}
static int mtk_star_enable(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
unsigned int val;
int ret;
mtk_star_nic_disable_pd(priv);
mtk_star_intr_disable(priv);
mtk_star_dma_stop(priv);
mtk_star_set_mac_addr(ndev);
/* Configure the MAC */
val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
val <<= MTK_STAR_OFF_MAC_CFG_IPG;
val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
/* Enable Hash Table BIST and reset it */
ret = mtk_star_reset_hash_table(priv);
if (ret)
return ret;
/* Setup the hashing algorithm */
regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
MTK_STAR_BIT_ARL_CFG_HASH_ALG |
MTK_STAR_BIT_ARL_CFG_MISC_MODE);
/* Don't strip VLAN tags */
regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
/* Setup DMA */
mtk_star_dma_init(priv);
ret = mtk_star_prepare_rx_skbs(ndev);
if (ret)
goto err_out;
/* Request the interrupt */
ret = request_irq(ndev->irq, mtk_star_handle_irq,
IRQF_TRIGGER_NONE, ndev->name, ndev);
if (ret)
goto err_free_skbs;
napi_enable(&priv->tx_napi);
napi_enable(&priv->rx_napi);
mtk_star_intr_ack_all(priv);
mtk_star_intr_enable(priv);
/* Connect to and start PHY */
priv->phydev = of_phy_connect(ndev, priv->phy_node,
mtk_star_adjust_link, 0, priv->phy_intf);
if (!priv->phydev) {
netdev_err(ndev, "failed to connect to PHY\n");
ret = -ENODEV;
goto err_free_irq;
}
mtk_star_dma_start(priv);
phy_start(priv->phydev);
netif_start_queue(ndev);
return 0;
err_free_irq:
napi_disable(&priv->rx_napi);
napi_disable(&priv->tx_napi);
free_irq(ndev->irq, ndev);
err_free_skbs:
mtk_star_free_rx_skbs(priv);
err_out:
return ret;
}
static void mtk_star_disable(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
napi_disable(&priv->tx_napi);
napi_disable(&priv->rx_napi);
mtk_star_intr_disable(priv);
mtk_star_dma_disable(priv);
mtk_star_intr_ack_all(priv);
phy_stop(priv->phydev);
phy_disconnect(priv->phydev);
free_irq(ndev->irq, ndev);
mtk_star_free_rx_skbs(priv);
mtk_star_free_tx_skbs(priv);
}
static int mtk_star_netdev_open(struct net_device *ndev)
{
return mtk_star_enable(ndev);
}
static int mtk_star_netdev_stop(struct net_device *ndev)
{
mtk_star_disable(ndev);
return 0;
}
static int mtk_star_netdev_ioctl(struct net_device *ndev,
struct ifreq *req, int cmd)
{
if (!netif_running(ndev))
return -EINVAL;
return phy_mii_ioctl(ndev->phydev, req, cmd);
}
static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
{
netif_stop_queue(priv->ndev);
/* Might race with mtk_star_tx_poll, check again */
smp_mb();
if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
return -EBUSY;
netif_start_queue(priv->ndev);
return 0;
}
static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
{
if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
return 0;
return __mtk_star_maybe_stop_tx(priv, size);
}
static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct mtk_star_ring *ring = &priv->tx_ring;
struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc_data desc_data;
int nfrags = skb_shinfo(skb)->nr_frags;
if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
if (!netif_queue_stopped(ndev)) {
netif_stop_queue(ndev);
/* This is a hard error, log it. */
pr_err_ratelimited("Tx ring full when queue awake\n");
}
return NETDEV_TX_BUSY;
}
desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
if (dma_mapping_error(dev, desc_data.dma_addr))
goto err_drop_packet;
desc_data.skb = skb;
desc_data.len = skb->len;
mtk_star_ring_push_head_tx(ring, &desc_data);
netdev_sent_queue(ndev, skb->len);
mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
mtk_star_dma_resume_tx(priv);
return NETDEV_TX_OK;
err_drop_packet:
dev_kfree_skb(skb);
ndev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
/* Returns the number of bytes sent or a negative number on the first
* descriptor owned by DMA.
*/
static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
{
struct mtk_star_ring *ring = &priv->tx_ring;
struct mtk_star_ring_desc_data desc_data;
int ret;
ret = mtk_star_ring_pop_tail(ring, &desc_data);
if (ret)
return ret;
mtk_star_dma_unmap_tx(priv, &desc_data);
ret = desc_data.skb->len;
dev_kfree_skb_irq(desc_data.skb);
return ret;
}
static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
tx_napi);
int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
struct mtk_star_ring *ring = &priv->tx_ring;
struct net_device *ndev = priv->ndev;
unsigned int head = ring->head;
unsigned int entry = ring->tail;
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
if (ret < 0)
break;
count++;
pkts_compl++;
bytes_compl += ret;
entry = ring->tail;
}
netdev_completed_queue(ndev, pkts_compl, bytes_compl);
if (unlikely(netif_queue_stopped(ndev)) &&
(mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
netif_wake_queue(ndev);
if (napi_complete(napi)) {
spin_lock(&priv->lock);
mtk_star_enable_dma_irq(priv, false, true);
spin_unlock(&priv->lock);
}
return 0;
}
static void mtk_star_netdev_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
mtk_star_update_stats(priv);
memcpy(stats, &priv->stats, sizeof(*stats));
}
static void mtk_star_set_rx_mode(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct netdev_hw_addr *hw_addr;
unsigned int hash_addr, i;
int ret;
if (ndev->flags & IFF_PROMISC) {
regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
MTK_STAR_BIT_ARL_CFG_MISC_MODE);
} else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
ndev->flags & IFF_ALLMULTI) {
for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
ret = mtk_star_set_hashbit(priv, i);
if (ret)
goto hash_fail;
}
} else {
/* Clear previous settings. */
ret = mtk_star_reset_hash_table(priv);
if (ret)
goto hash_fail;
netdev_for_each_mc_addr(hw_addr, ndev) {
hash_addr = (hw_addr->addr[0] & 0x01) << 8;
hash_addr += hw_addr->addr[5];
ret = mtk_star_set_hashbit(priv, hash_addr);
if (ret)
goto hash_fail;
}
}
return;
hash_fail:
if (ret == -ETIMEDOUT)
netdev_err(ndev, "setting hash bit timed out\n");
else
/* Should be -EIO */
netdev_err(ndev, "unable to set hash bit");
}
static const struct net_device_ops mtk_star_netdev_ops = {
.ndo_open = mtk_star_netdev_open,
.ndo_stop = mtk_star_netdev_stop,
.ndo_start_xmit = mtk_star_netdev_start_xmit,
.ndo_get_stats64 = mtk_star_netdev_get_stats64,
.ndo_set_rx_mode = mtk_star_set_rx_mode,
.ndo_eth_ioctl = mtk_star_netdev_ioctl,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static void mtk_star_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
}
/* TODO Add ethtool stats. */
static const struct ethtool_ops mtk_star_ethtool_ops = {
.get_drvinfo = mtk_star_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
{
struct mtk_star_ring *ring = &priv->rx_ring;
struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc_data desc_data;
struct net_device *ndev = priv->ndev;
struct sk_buff *curr_skb, *new_skb;
dma_addr_t new_dma_addr;
int ret, count = 0;
while (count < budget) {
ret = mtk_star_ring_pop_tail(ring, &desc_data);
if (ret)
return -1;
curr_skb = desc_data.skb;
if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
(desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
/* Error packet -> drop and reuse skb. */
new_skb = curr_skb;
goto push_new_skb;
}
/* Prepare new skb before receiving the current one.
* Reuse the current skb if we fail at any point.
*/
new_skb = mtk_star_alloc_skb(ndev);
if (!new_skb) {
ndev->stats.rx_dropped++;
new_skb = curr_skb;
goto push_new_skb;
}
new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
if (dma_mapping_error(dev, new_dma_addr)) {
ndev->stats.rx_dropped++;
dev_kfree_skb(new_skb);
new_skb = curr_skb;
netdev_err(ndev, "DMA mapping error of RX descriptor\n");
goto push_new_skb;
}
/* We can't fail anymore at this point:
* it's safe to unmap the skb.
*/
mtk_star_dma_unmap_rx(priv, &desc_data);
skb_put(desc_data.skb, desc_data.len);
desc_data.skb->ip_summed = CHECKSUM_NONE;
desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
desc_data.skb->dev = ndev;
netif_receive_skb(desc_data.skb);
/* update dma_addr for new skb */
desc_data.dma_addr = new_dma_addr;
push_new_skb:
count++;
desc_data.len = skb_tailroom(new_skb);
desc_data.skb = new_skb;
mtk_star_ring_push_head_rx(ring, &desc_data);
}
mtk_star_dma_resume_rx(priv);
return count;
}
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
int work_done = 0;
priv = container_of(napi, struct mtk_star_priv, rx_napi);
work_done = mtk_star_rx(priv, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
spin_lock(&priv->lock);
mtk_star_enable_dma_irq(priv, true, false);
spin_unlock(&priv->lock);
}
return work_done;
}
static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
{
regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
MTK_STAR_BIT_PHY_CTRL0_RWOK);
}
static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
{
unsigned int val;
return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
10, MTK_STAR_WAIT_TIMEOUT);
}
static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
{
struct mtk_star_priv *priv = mii->priv;
unsigned int val, data;
int ret;
mtk_star_mdio_rwok_clear(priv);
val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
ret = mtk_star_mdio_rwok_wait(priv);
if (ret)
return ret;
regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
return data;
}
static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
int regnum, u16 data)
{
struct mtk_star_priv *priv = mii->priv;
unsigned int val;
mtk_star_mdio_rwok_clear(priv);
val = data;
val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
val |= regnum;
val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
return mtk_star_mdio_rwok_wait(priv);
}
static int mtk_star_mdio_init(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct device *dev = mtk_star_get_dev(priv);
struct device_node *of_node, *mdio_node;
int ret;
of_node = dev->of_node;
mdio_node = of_get_child_by_name(of_node, "mdio");
if (!mdio_node)
return -ENODEV;
if (!of_device_is_available(mdio_node)) {
ret = -ENODEV;
goto out_put_node;
}
priv->mii = devm_mdiobus_alloc(dev);
if (!priv->mii) {
ret = -ENOMEM;
goto out_put_node;
}
snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
priv->mii->name = "mtk-mac-mdio";
priv->mii->parent = dev;
priv->mii->read = mtk_star_mdio_read;
priv->mii->write = mtk_star_mdio_write;
priv->mii->priv = priv;
ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
out_put_node:
of_node_put(mdio_node);
return ret;
}
static __maybe_unused int mtk_star_suspend(struct device *dev)
{
struct mtk_star_priv *priv;
struct net_device *ndev;
ndev = dev_get_drvdata(dev);
priv = netdev_priv(ndev);
if (netif_running(ndev))
mtk_star_disable(ndev);
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
return 0;
}
static __maybe_unused int mtk_star_resume(struct device *dev)
{
struct mtk_star_priv *priv;
struct net_device *ndev;
int ret;
ndev = dev_get_drvdata(dev);
priv = netdev_priv(ndev);
ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
if (ret)
return ret;
if (netif_running(ndev)) {
ret = mtk_star_enable(ndev);
if (ret)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
return ret;
}
static void mtk_star_clk_disable_unprepare(void *data)
{
struct mtk_star_priv *priv = data;
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
static int mtk_star_set_timing(struct mtk_star_priv *priv)
{
struct device *dev = mtk_star_get_dev(priv);
unsigned int delay_val = 0;
switch (priv->phy_intf) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RMII:
delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
break;
default:
dev_err(dev, "This interface not supported\n");
return -EINVAL;
}
return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
}
static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
struct mtk_star_priv *priv;
struct net_device *ndev;
struct device *dev;
void __iomem *base;
int ret, i;
dev = &pdev->dev;
of_node = dev->of_node;
ndev = devm_alloc_etherdev(dev, sizeof(*priv));
if (!ndev)
return -ENOMEM;
priv = netdev_priv(ndev);
priv->ndev = ndev;
priv->compat_data = of_device_get_match_data(&pdev->dev);
SET_NETDEV_DEV(ndev, dev);
platform_set_drvdata(pdev, ndev);
ndev->min_mtu = ETH_ZLEN;
ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
spin_lock_init(&priv->lock);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
/* We won't be checking the return values of regmap read & write
* functions. They can only fail for mmio if there's a clock attached
* to regmap which is not the case here.
*/
priv->regs = devm_regmap_init_mmio(dev, base,
&mtk_star_regmap_config);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
"mediatek,pericfg");
if (IS_ERR(priv->pericfg)) {
dev_err(dev, "Failed to lookup the PERICFG syscon\n");
return PTR_ERR(priv->pericfg);
}
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq < 0)
return ndev->irq;
for (i = 0; i < MTK_STAR_NCLKS; i++)
priv->clks[i].id = mtk_star_clk_names[i];
ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
if (ret)
return ret;
ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev,
mtk_star_clk_disable_unprepare, priv);
if (ret)
return ret;
ret = of_get_phy_mode(of_node, &priv->phy_intf);
if (ret) {
return ret;
} else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
priv->phy_intf != PHY_INTERFACE_MODE_MII) {
dev_err(dev, "unsupported phy mode: %s\n",
phy_modes(priv->phy_intf));
return -EINVAL;
}
priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
if (!priv->phy_node) {
dev_err(dev, "failed to retrieve the phy handle from device tree\n");
return -ENODEV;
}
priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
if (priv->compat_data->set_interface_mode) {
ret = priv->compat_data->set_interface_mode(ndev);
if (ret) {
dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
return -EINVAL;
}
}
ret = mtk_star_set_timing(priv);
if (ret) {
dev_err(dev, "Failed to set timing, err = %d\n", ret);
return -EINVAL;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "unsupported DMA mask\n");
return ret;
}
priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
&priv->dma_addr,
GFP_KERNEL | GFP_DMA);
if (!priv->ring_base)
return -ENOMEM;
mtk_star_nic_disable_pd(priv);
mtk_star_init_config(priv);
ret = mtk_star_mdio_init(ndev);
if (ret)
return ret;
ret = platform_get_ethdev_address(dev, ndev);
if (ret || !is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
return devm_register_netdev(dev, ndev);
}
#ifdef CONFIG_OF
static int mt8516_set_interface_mode(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct device *dev = mtk_star_get_dev(priv);
unsigned int intf_val, ret, rmii_rxc;
switch (priv->phy_intf) {
case PHY_INTERFACE_MODE_MII:
intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
rmii_rxc = 0;
break;
case PHY_INTERFACE_MODE_RMII:
intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
break;
default:
dev_err(dev, "This interface not supported\n");
return -EINVAL;
}
ret = regmap_update_bits(priv->pericfg,
MTK_PERICFG_REG_NIC_CFG1_CON,
MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
rmii_rxc);
if (ret)
return ret;
return regmap_update_bits(priv->pericfg,
MTK_PERICFG_REG_NIC_CFG0_CON,
MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
intf_val);
}
static int mt8365_set_interface_mode(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct device *dev = mtk_star_get_dev(priv);
unsigned int intf_val;
switch (priv->phy_intf) {
case PHY_INTERFACE_MODE_MII:
intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
break;
case PHY_INTERFACE_MODE_RMII:
intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
break;
default:
dev_err(dev, "This interface not supported\n");
return -EINVAL;
}
return regmap_update_bits(priv->pericfg,
MTK_PERICFG_REG_NIC_CFG_CON_V2,
MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
intf_val);
}
static const struct mtk_star_compat mtk_star_mt8516_compat = {
.set_interface_mode = mt8516_set_interface_mode,
.bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
};
static const struct mtk_star_compat mtk_star_mt8365_compat = {
.set_interface_mode = mt8365_set_interface_mode,
.bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
};
static const struct of_device_id mtk_star_of_match[] = {
{ .compatible = "mediatek,mt8516-eth",
.data = &mtk_star_mt8516_compat },
{ .compatible = "mediatek,mt8518-eth",
.data = &mtk_star_mt8516_compat },
{ .compatible = "mediatek,mt8175-eth",
.data = &mtk_star_mt8516_compat },
{ .compatible = "mediatek,mt8365-eth",
.data = &mtk_star_mt8365_compat },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_star_of_match);
#endif
static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
mtk_star_suspend, mtk_star_resume);
static struct platform_driver mtk_star_driver = {
.driver = {
.name = MTK_STAR_DRVNAME,
.pm = &mtk_star_pm_ops,
.of_match_table = of_match_ptr(mtk_star_of_match),
},
.probe = mtk_star_probe,
};
module_platform_driver(mtk_star_driver);
MODULE_AUTHOR("Bartosz Golaszewski <[email protected]>");
MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_star_emac.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2021 Felix Fietkau <[email protected]> */
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/skbuff.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/debugfs.h>
#include <linux/soc/mediatek/mtk_wed.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include "mtk_eth_soc.h"
#include "mtk_wed_regs.h"
#include "mtk_wed.h"
#include "mtk_ppe.h"
#include "mtk_wed_wo.h"
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
#define MTK_WED_PKT_SIZE 1900
#define MTK_WED_BUF_SIZE 2048
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
#define MTK_WED_RX_RING_SIZE 1536
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 1024
#define MTK_WED_MAX_GROUP_SIZE 0x100
#define MTK_WED_VLD_GROUP_SIZE 0x40
#define MTK_WED_PER_GROUP_PKT 128
#define MTK_WED_FBUF_SIZE 128
#define MTK_WED_MIOD_CNT 16
#define MTK_WED_FB_CMD_CNT 1024
#define MTK_WED_RRO_QUE_CNT 8192
#define MTK_WED_MIOD_ENTRY_CNT 128
static struct mtk_wed_hw *hw_list[2];
static DEFINE_MUTEX(hw_lock);
struct mtk_wed_flow_block_priv {
struct mtk_wed_hw *hw;
struct net_device *dev;
};
static void
wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
{
regmap_update_bits(dev->hw->regs, reg, mask | val, val);
}
static void
wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
return wed_m32(dev, reg, 0, mask);
}
static void
wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
return wed_m32(dev, reg, mask, 0);
}
static void
wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
{
wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
}
static void
wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
wdma_m32(dev, reg, 0, mask);
}
static void
wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
wdma_m32(dev, reg, mask, 0);
}
static u32
wifi_r32(struct mtk_wed_device *dev, u32 reg)
{
return readl(dev->wlan.base + reg);
}
static void
wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
{
writel(val, dev->wlan.base + reg);
}
static u32
mtk_wed_read_reset(struct mtk_wed_device *dev)
{
return wed_r32(dev, MTK_WED_RESET);
}
static u32
mtk_wdma_read_reset(struct mtk_wed_device *dev)
{
return wdma_r32(dev, MTK_WDMA_GLO_CFG);
}
static int
mtk_wdma_rx_reset(struct mtk_wed_device *dev)
{
u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
int i, ret;
wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
!(status & mask), 0, 10000);
if (ret)
dev_err(dev->hw->dev, "rx reset failed\n");
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
if (dev->rx_wdma[i].desc)
continue;
wdma_w32(dev,
MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
}
return ret;
}
static void
mtk_wdma_tx_reset(struct mtk_wed_device *dev)
{
u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
int i;
wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
!(status & mask), 0, 10000))
dev_err(dev->hw->dev, "tx reset failed\n");
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
wdma_w32(dev,
MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
}
static void
mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
{
u32 status;
wed_w32(dev, MTK_WED_RESET, mask);
if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
!(status & mask), 0, 1000))
WARN_ON_ONCE(1);
}
static u32
mtk_wed_wo_read_status(struct mtk_wed_device *dev)
{
return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS);
}
static void
mtk_wed_wo_reset(struct mtk_wed_device *dev)
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
u8 state = MTK_WED_WO_STATE_DISABLE;
void __iomem *reg;
u32 val;
mtk_wdma_tx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
MTK_WED_WO_CMD_CHANGE_STATE, &state,
sizeof(state), false))
return;
if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
val == MTK_WED_WOIF_DISABLE_DONE,
100, MTK_WOCPU_TIMEOUT))
dev_err(dev->hw->dev, "failed to disable wed-wo\n");
reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4);
val = readl(reg);
switch (dev->hw->index) {
case 0:
val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
writel(val, reg);
val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
writel(val, reg);
break;
case 1:
val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
writel(val, reg);
val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
writel(val, reg);
break;
default:
break;
}
iounmap(reg);
}
void mtk_wed_fe_reset(void)
{
int i;
mutex_lock(&hw_lock);
for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
struct mtk_wed_hw *hw = hw_list[i];
struct mtk_wed_device *dev;
int err;
if (!hw)
break;
dev = hw->wed_dev;
if (!dev || !dev->wlan.reset)
continue;
/* reset callback blocks until WLAN reset is completed */
err = dev->wlan.reset(dev);
if (err)
dev_err(dev->dev, "wlan reset failed: %d\n", err);
}
mutex_unlock(&hw_lock);
}
void mtk_wed_fe_reset_complete(void)
{
int i;
mutex_lock(&hw_lock);
for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
struct mtk_wed_hw *hw = hw_list[i];
struct mtk_wed_device *dev;
if (!hw)
break;
dev = hw->wed_dev;
if (!dev || !dev->wlan.reset_complete)
continue;
dev->wlan.reset_complete(dev);
}
mutex_unlock(&hw_lock);
}
static struct mtk_wed_hw *
mtk_wed_assign(struct mtk_wed_device *dev)
{
struct mtk_wed_hw *hw;
int i;
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
if (!hw)
return NULL;
if (!hw->wed_dev)
goto out;
if (hw->version == 1)
return NULL;
/* MT7986 WED devices do not have any pcie slot restrictions */
}
/* MT7986 PCIE or AXI */
for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
hw = hw_list[i];
if (hw && !hw->wed_dev)
goto out;
}
return NULL;
out:
hw->wed_dev = dev;
return hw;
}
static int
mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
void **page_list;
int token = dev->wlan.token_start;
int ring_size;
int n_pages;
int i, page_idx;
ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
dev->tx_buf_ring.size = ring_size;
dev->tx_buf_ring.pages = page_list;
desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
&desc_phys, GFP_KERNEL);
if (!desc)
return -ENOMEM;
dev->tx_buf_ring.desc = desc;
dev->tx_buf_ring.desc_phys = desc_phys;
for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
dma_addr_t page_phys, buf_phys;
struct page *page;
void *buf;
int s;
page = __dev_alloc_pages(GFP_KERNEL, 0);
if (!page)
return -ENOMEM;
page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev->hw->dev, page_phys)) {
__free_page(page);
return -ENOMEM;
}
page_list[page_idx++] = page;
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
buf = page_to_virt(page);
buf_phys = page_phys;
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
u32 txd_size;
u32 ctrl;
txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
desc->buf0 = cpu_to_le32(buf_phys);
desc->buf1 = cpu_to_le32(buf_phys + txd_size);
if (dev->hw->version == 1)
ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
MTK_WED_BUF_SIZE - txd_size) |
MTK_WDMA_DESC_CTRL_LAST_SEG1;
else
ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
MTK_WED_BUF_SIZE - txd_size) |
MTK_WDMA_DESC_CTRL_LAST_SEG0;
desc->ctrl = cpu_to_le32(ctrl);
desc->info = 0;
desc++;
buf += MTK_WED_BUF_SIZE;
buf_phys += MTK_WED_BUF_SIZE;
}
dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
return 0;
}
static void
mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
{
struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
void **page_list = dev->tx_buf_ring.pages;
int page_idx;
int i;
if (!page_list)
return;
if (!desc)
goto free_pagelist;
for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
i += MTK_WED_BUF_PER_PAGE) {
void *page = page_list[page_idx++];
dma_addr_t buf_addr;
if (!page)
break;
buf_addr = le32_to_cpu(desc[i].buf0);
dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(page);
}
dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
desc, dev->tx_buf_ring.desc_phys);
free_pagelist:
kfree(page_list);
}
static int
mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
{
struct mtk_rxbm_desc *desc;
dma_addr_t desc_phys;
dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
desc = dma_alloc_coherent(dev->hw->dev,
dev->wlan.rx_nbuf * sizeof(*desc),
&desc_phys, GFP_KERNEL);
if (!desc)
return -ENOMEM;
dev->rx_buf_ring.desc = desc;
dev->rx_buf_ring.desc_phys = desc_phys;
dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
return 0;
}
static void
mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
{
struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
if (!desc)
return;
dev->wlan.release_rx_buf(dev);
dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
desc, dev->rx_buf_ring.desc_phys);
}
static void
mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
{
wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
}
static void
mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
{
if (!ring->desc)
return;
dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
ring->desc, ring->desc_phys);
}
static void
mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
{
mtk_wed_free_rx_buffer(dev);
mtk_wed_free_ring(dev, &dev->rro.ring);
}
static void
mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
mtk_wed_free_ring(dev, &dev->tx_ring[i]);
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
}
static void
mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
{
u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
if (dev->hw->version == 1)
mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
else
mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
if (!dev->hw->num_flows)
mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
wed_r32(dev, MTK_WED_EXT_INT_MASK);
}
static void
mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
{
if (enable) {
wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
wed_w32(dev, MTK_WED_TXP_DW1,
FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
} else {
wed_w32(dev, MTK_WED_TXP_DW1,
FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
}
}
#define MTK_WFMDA_RX_DMA_EN BIT(2)
static void
mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
{
u32 val;
int i;
if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
return; /* queue is not configured by mt76 */
for (i = 0; i < 3; i++) {
u32 cur_idx;
cur_idx = wed_r32(dev,
MTK_WED_WPDMA_RING_RX_DATA(idx) +
MTK_WED_RING_OFS_CPU_IDX);
if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
break;
usleep_range(100000, 200000);
}
if (i == 3) {
dev_err(dev->hw->dev, "rx dma enable failed\n");
return;
}
val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
MTK_WFMDA_RX_DMA_EN;
wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
}
static void
mtk_wed_dma_disable(struct mtk_wed_device *dev)
{
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
wed_clr(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_EN |
MTK_WED_GLO_CFG_RX_DMA_EN);
wdma_clr(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_TX_DMA_EN |
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
if (dev->hw->version == 1) {
regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
wdma_clr(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_EN);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
}
mtk_wed_set_512_support(dev, false);
}
static void
mtk_wed_stop(struct mtk_wed_device *dev)
{
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
if (dev->hw->version == 1)
return;
wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
}
static void
mtk_wed_deinit(struct mtk_wed_device *dev)
{
mtk_wed_stop(dev);
mtk_wed_dma_disable(dev);
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
if (dev->hw->version == 1)
return;
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_EN |
MTK_WED_CTRL_WED_RX_BM_EN |
MTK_WED_CTRL_RX_RRO_QM_EN);
}
static void
__mtk_wed_detach(struct mtk_wed_device *dev)
{
struct mtk_wed_hw *hw = dev->hw;
mtk_wed_deinit(dev);
mtk_wdma_rx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
mtk_wed_free_tx_buffer(dev);
mtk_wed_free_tx_rings(dev);
if (mtk_wed_get_rx_capa(dev)) {
if (hw->wed_wo)
mtk_wed_wo_reset(dev);
mtk_wed_free_rx_rings(dev);
if (hw->wed_wo)
mtk_wed_wo_deinit(hw);
}
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
wlan_node = dev->wlan.pci_dev->dev.of_node;
if (of_dma_is_coherent(wlan_node) && hw->hifsys)
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
BIT(hw->index), BIT(hw->index));
}
if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
hw->eth->dma_dev != hw->eth->dev)
mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
memset(dev, 0, sizeof(*dev));
module_put(THIS_MODULE);
hw->wed_dev = NULL;
}
static void
mtk_wed_detach(struct mtk_wed_device *dev)
{
mutex_lock(&hw_lock);
__mtk_wed_detach(dev);
mutex_unlock(&hw_lock);
}
#define PCIE_BASE_ADDR0 0x11280000
static void
mtk_wed_bus_init(struct mtk_wed_device *dev)
{
switch (dev->wlan.bus_type) {
case MTK_WED_BUS_PCIE: {
struct device_node *np = dev->hw->eth->dev->of_node;
struct regmap *regs;
regs = syscon_regmap_lookup_by_phandle(np,
"mediatek,wed-pcie");
if (IS_ERR(regs))
break;
regmap_update_bits(regs, 0, BIT(0), BIT(0));
wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
/* pcie interrupt control: pola/source selection */
wed_set(dev, MTK_WED_PCIE_INT_CTRL,
MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
/* pcie interrupt status trigger register */
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
/* pola setting */
wed_set(dev, MTK_WED_PCIE_INT_CTRL,
MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
break;
}
case MTK_WED_BUS_AXI:
wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
break;
default:
break;
}
}
static void
mtk_wed_set_wpdma(struct mtk_wed_device *dev)
{
if (dev->hw->version == 1) {
wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
} else {
mtk_wed_bus_init(dev);
wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
}
}
static void
mtk_wed_hw_init_early(struct mtk_wed_device *dev)
{
u32 mask, set;
mtk_wed_deinit(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
mtk_wed_set_wpdma(dev);
mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
if (dev->hw->version == 1) {
u32 offset = dev->hw->index ? 0x04000400 : 0;
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
MTK_PCIE_BASE(dev->hw->index));
} else {
wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
wed_w32(dev, MTK_WED_WDMA_OFFSET0,
FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
MTK_WDMA_INT_STATUS) |
FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
MTK_WDMA_GLO_CFG));
wed_w32(dev, MTK_WED_WDMA_OFFSET1,
FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
MTK_WDMA_RING_TX(0)) |
FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
MTK_WDMA_RING_RX(0)));
}
}
static int
mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
int size)
{
ring->desc = dma_alloc_coherent(dev->hw->dev,
size * sizeof(*ring->desc),
&ring->desc_phys, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
ring->desc_size = sizeof(*ring->desc);
ring->size = size;
return 0;
}
#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT)
static int
mtk_wed_rro_alloc(struct mtk_wed_device *dev)
{
struct reserved_mem *rmem;
struct device_node *np;
int index;
index = of_property_match_string(dev->hw->node, "memory-region-names",
"wo-dlm");
if (index < 0)
return index;
np = of_parse_phandle(dev->hw->node, "memory-region", index);
if (!np)
return -ENODEV;
rmem = of_reserved_mem_lookup(np);
of_node_put(np);
if (!rmem)
return -ENODEV;
dev->rro.miod_phys = rmem->base;
dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
MTK_WED_RRO_QUE_CNT);
}
static int
mtk_wed_rro_cfg(struct mtk_wed_device *dev)
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
struct {
struct {
__le32 base;
__le32 cnt;
__le32 unit;
} ring[2];
__le32 wed;
u8 version;
} req = {
.ring[0] = {
.base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE),
.cnt = cpu_to_le32(MTK_WED_MIOD_CNT),
.unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT),
},
.ring[1] = {
.base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE +
MTK_WED_MIOD_COUNT),
.cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT),
.unit = cpu_to_le32(4),
},
};
return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
MTK_WED_WO_CMD_WED_CFG,
&req, sizeof(req), true);
}
static void
mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
{
wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
MTK_WED_MIOD_ENTRY_CNT >> 2));
wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys);
wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys);
wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys);
wed_set(dev, MTK_WED_RROQM_RST_IDX,
MTK_WED_RROQM_RST_IDX_MIOD |
MTK_WED_RROQM_RST_IDX_FDBK);
wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1);
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
}
static void
mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
{
wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
for (;;) {
usleep_range(100, 200);
if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
break;
}
/* configure RX_ROUTE_QM */
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
wed_set(dev, MTK_WED_RTQM_GLO_CFG,
FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
/* enable RX_ROUTE_QM */
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
}
static void
mtk_wed_hw_init(struct mtk_wed_device *dev)
{
if (dev->init_done)
return;
dev->init_done = true;
mtk_wed_set_ext_int(dev, false);
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
if (dev->hw->version == 1) {
wed_w32(dev, MTK_WED_TX_BM_TKID,
FIELD_PREP(MTK_WED_TX_BM_TKID_START,
dev->wlan.token_start) |
FIELD_PREP(MTK_WED_TX_BM_TKID_END,
dev->wlan.token_start +
dev->wlan.nbuf - 1));
wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
MTK_WED_TX_BM_DYN_THR_HI);
} else {
wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
FIELD_PREP(MTK_WED_TX_BM_TKID_START,
dev->wlan.token_start) |
FIELD_PREP(MTK_WED_TX_BM_TKID_END,
dev->wlan.token_start +
dev->wlan.nbuf - 1));
wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
MTK_WED_TX_BM_DYN_THR_HI_V2);
wed_w32(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
dev->tx_buf_ring.size / 128));
wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
MTK_WED_TX_TKID_DYN_THR_HI);
}
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
if (dev->hw->version == 1) {
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
} else {
wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
/* rx hw init */
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
mtk_wed_rx_buffer_hw_init(dev);
mtk_wed_rro_hw_init(dev);
mtk_wed_route_qm_hw_init(dev);
}
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
}
static void
mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
{
void *head = (void *)ring->desc;
int i;
for (i = 0; i < size; i++) {
struct mtk_wdma_desc *desc;
desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
desc->buf0 = 0;
if (tx)
desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
else
desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
desc->buf1 = 0;
desc->info = 0;
}
}
static u32
mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
return !!(wed_r32(dev, reg) & mask);
}
static int
mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
{
int sleep = 15000;
int timeout = 100 * sleep;
u32 val;
return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
timeout, false, dev, reg, mask);
}
static int
mtk_wed_rx_reset(struct mtk_wed_device *dev)
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
u8 val = MTK_WED_WO_STATE_SER_RESET;
int i, ret;
ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
MTK_WED_WO_CMD_CHANGE_STATE, &val,
sizeof(val), true);
if (ret)
return ret;
wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
} else {
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
}
/* reset rro qm */
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_RRO_QM_BUSY);
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
} else {
wed_set(dev, MTK_WED_RROQM_RST_IDX,
MTK_WED_RROQM_RST_IDX_MIOD |
MTK_WED_RROQM_RST_IDX_FDBK);
wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
}
/* reset route qm */
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
if (ret)
mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
else
wed_set(dev, MTK_WED_RTQM_GLO_CFG,
MTK_WED_RTQM_Q_RST);
/* reset tx wdma */
mtk_wdma_tx_reset(dev);
/* reset tx wdma drv */
wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
/* reset wed rx dma */
ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_RX_DMA_BUSY);
wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
} else {
struct mtk_eth *eth = dev->hw->eth;
if (mtk_is_netsys_v2_or_greater(eth))
wed_set(dev, MTK_WED_RESET_IDX,
MTK_WED_RESET_IDX_RX_V2);
else
wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
/* reset rx bm */
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WED_RX_BM_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
/* wo change to enable state */
val = MTK_WED_WO_STATE_ENABLE;
ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
MTK_WED_WO_CMD_CHANGE_STATE, &val,
sizeof(val), true);
if (ret)
return ret;
/* wed_rx_ring_reset */
for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
if (!dev->rx_ring[i].desc)
continue;
mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
false);
}
mtk_wed_free_rx_buffer(dev);
return 0;
}
static void
mtk_wed_reset_dma(struct mtk_wed_device *dev)
{
bool busy = false;
u32 val;
int i;
for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
if (!dev->tx_ring[i].desc)
continue;
mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE,
true);
}
/* 1. reset WED tx DMA */
wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_BUSY);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
} else {
wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
/* 2. reset WDMA rx DMA */
busy = !!mtk_wdma_rx_reset(dev);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
if (!busy)
busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
} else {
wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
}
/* 3. reset WED WPDMA tx */
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
for (i = 0; i < 100; i++) {
val = wed_r32(dev, MTK_WED_TX_BM_INTF);
if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
break;
}
mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
/* 4. reset WED WPDMA tx */
busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
if (!busy)
busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
} else {
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
MTK_WED_WPDMA_RESET_IDX_TX |
MTK_WED_WPDMA_RESET_IDX_RX);
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
dev->init_done = false;
if (dev->hw->version == 1)
return;
if (!busy) {
wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
mtk_wed_rx_reset(dev);
}
static int
mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
int size, u32 desc_size, bool tx)
{
ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
&ring->desc_phys, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
ring->desc_size = desc_size;
ring->size = size;
mtk_wed_ring_reset(ring, size, tx);
return 0;
}
static int
mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
bool reset)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->rx_wdma))
return -EINVAL;
wdma = &dev->rx_wdma[idx];
if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
desc_size, true))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
size);
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
size);
return 0;
}
static int
mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
bool reset)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->tx_wdma))
return -EINVAL;
wdma = &dev->tx_wdma[idx];
if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
desc_size, true))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
size);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
if (reset)
mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true);
if (!idx) {
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT,
size);
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX,
0);
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX,
0);
}
return 0;
}
static void
mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
u32 reason, u32 hash)
{
struct mtk_eth *eth = dev->hw->eth;
struct ethhdr *eh;
if (!skb)
return;
if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
return;
skb_set_mac_header(skb, 0);
eh = eth_hdr(skb);
skb->protocol = eh->h_proto;
mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash);
}
static void
mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
{
u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
/* wed control cr set */
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WDMA_INT_AGENT_EN |
MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
if (dev->hw->version == 1) {
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
MTK_WED_PCIE_INT_TRIGGER_STATUS);
wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
GENMASK(1, 0));
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
dev->wlan.tx_tbit[0]) |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
dev->wlan.tx_tbit[1]));
/* initail txfree interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
MTK_WED_WPDMA_INT_CTRL_RX0_EN |
MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
MTK_WED_WPDMA_INT_CTRL_RX1_EN |
MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
dev->wlan.rx_tbit[0]) |
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
dev->wlan.rx_tbit[1]));
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
dev->wdma_idx));
}
wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
}
static void
mtk_wed_dma_enable(struct mtk_wed_device *dev)
{
wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
wed_set(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_EN |
MTK_WED_GLO_CFG_RX_DMA_EN);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_TX_DMA_EN |
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
if (dev->hw->version == 1) {
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
int i;
wed_set(dev, MTK_WED_WPDMA_CTRL,
MTK_WED_WPDMA_CTRL_SDL1_FIXED);
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_EN |
FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
0x2));
for (i = 0; i < MTK_WED_RX_QUEUES; i++)
mtk_wed_check_wfdma_rx_fill(dev, i);
}
}
static void
mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
{
int i;
if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
return;
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
mtk_wed_set_ext_int(dev, true);
if (dev->hw->version == 1) {
u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
dev->hw->index);
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
} else {
/* driver set mid ready and only once */
wed_w32(dev, MTK_WED_EXT_INT_MASK1,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
wed_w32(dev, MTK_WED_EXT_INT_MASK2,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
wed_r32(dev, MTK_WED_EXT_INT_MASK1);
wed_r32(dev, MTK_WED_EXT_INT_MASK2);
if (mtk_wed_rro_cfg(dev))
return;
}
mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
mtk_wed_dma_enable(dev);
dev->running = true;
}
static int
mtk_wed_attach(struct mtk_wed_device *dev)
__releases(RCU)
{
struct mtk_wed_hw *hw;
struct device *device;
int ret = 0;
RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
"mtk_wed_attach without holding the RCU read lock");
if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
!try_module_get(THIS_MODULE))
ret = -ENODEV;
rcu_read_unlock();
if (ret)
return ret;
mutex_lock(&hw_lock);
hw = mtk_wed_assign(dev);
if (!hw) {
module_put(THIS_MODULE);
ret = -ENODEV;
goto unlock;
}
device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
? &dev->wlan.pci_dev->dev
: &dev->wlan.platform_dev->dev;
dev_info(device, "attaching wed device %d version %d\n",
hw->index, hw->version);
dev->hw = hw;
dev->dev = hw->dev;
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
dev->version = hw->version;
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
mtk_eth_set_dma_device(hw->eth, hw->dev);
ret = mtk_wed_tx_buffer_alloc(dev);
if (ret)
goto out;
if (mtk_wed_get_rx_capa(dev)) {
ret = mtk_wed_rro_alloc(dev);
if (ret)
goto out;
}
mtk_wed_hw_init_early(dev);
if (hw->version == 1) {
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
BIT(hw->index), 0);
} else {
dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
ret = mtk_wed_wo_init(hw);
}
out:
if (ret) {
dev_err(dev->hw->dev, "failed to attach wed device\n");
__mtk_wed_detach(dev);
}
unlock:
mutex_unlock(&hw_lock);
return ret;
}
static int
mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
bool reset)
{
struct mtk_wed_ring *ring = &dev->tx_ring[idx];
/*
* Tx ring redirection:
* Instead of configuring the WLAN PDMA TX ring directly, the WLAN
* driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
* registers.
*
* WED driver posts its own DMA ring as WLAN PDMA TX and configures it
* into MTK_WED_WPDMA_RING_TX(n) registers.
* It gets filled with packets picked up from WED TX ring and from
* WDMA RX.
*/
if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
return -EINVAL;
if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
sizeof(*ring->desc), true))
return -ENOMEM;
if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
reset))
return -ENOMEM;
ring->reg_base = MTK_WED_RING_TX(idx);
ring->wpdma = regs;
/* WED -> WPDMA */
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
ring->desc_phys);
wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
MTK_WED_TX_RING_SIZE);
wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
return 0;
}
static int
mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->txfree_ring;
int i, index = dev->hw->version == 1;
/*
* For txfree event handling, the same DMA ring is shared between WED
* and WLAN. The WLAN driver accesses the ring index registers through
* WED
*/
ring->reg_base = MTK_WED_RING_RX(index);
ring->wpdma = regs;
for (i = 0; i < 12; i += 4) {
u32 val = readl(regs + i);
wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
}
return 0;
}
static int
mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
bool reset)
{
struct mtk_wed_ring *ring = &dev->rx_ring[idx];
if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
return -EINVAL;
if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
sizeof(*ring->desc), false))
return -ENOMEM;
if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
reset))
return -ENOMEM;
ring->reg_base = MTK_WED_RING_RX_DATA(idx);
ring->wpdma = regs;
ring->flags |= MTK_WED_RING_CONFIGURED;
/* WPDMA -> WED */
wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
ring->desc_phys);
wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
MTK_WED_RX_RING_SIZE);
return 0;
}
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
if (dev->hw->version == 1)
ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
else
ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
val &= ext_mask;
if (!dev->hw->num_flows)
val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
if (val && net_ratelimit())
pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
val = wed_r32(dev, MTK_WED_INT_STATUS);
val &= mask;
wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
return val;
}
static void
mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
{
if (!dev->running)
return;
mtk_wed_set_ext_int(dev, !!mask);
wed_w32(dev, MTK_WED_INT_MASK, mask);
}
int mtk_wed_flow_add(int index)
{
struct mtk_wed_hw *hw = hw_list[index];
int ret;
if (!hw || !hw->wed_dev)
return -ENODEV;
if (hw->num_flows) {
hw->num_flows++;
return 0;
}
mutex_lock(&hw_lock);
if (!hw->wed_dev) {
ret = -ENODEV;
goto out;
}
ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
if (!ret)
hw->num_flows++;
mtk_wed_set_ext_int(hw->wed_dev, true);
out:
mutex_unlock(&hw_lock);
return ret;
}
void mtk_wed_flow_remove(int index)
{
struct mtk_wed_hw *hw = hw_list[index];
if (!hw)
return;
if (--hw->num_flows)
return;
mutex_lock(&hw_lock);
if (!hw->wed_dev)
goto out;
hw->wed_dev->wlan.offload_disable(hw->wed_dev);
mtk_wed_set_ext_int(hw->wed_dev, true);
out:
mutex_unlock(&hw_lock);
}
static int
mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{
struct mtk_wed_flow_block_priv *priv = cb_priv;
struct flow_cls_offload *cls = type_data;
struct mtk_wed_hw *hw = priv->hw;
if (!tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
}
static int
mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
struct flow_block_offload *f)
{
struct mtk_wed_flow_block_priv *priv;
static LIST_HEAD(block_cb_list);
struct flow_block_cb *block_cb;
struct mtk_eth *eth = hw->eth;
flow_setup_cb_t *cb;
if (!eth->soc->offload_version)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
cb = mtk_wed_setup_tc_block_cb;
f->driver_block_list = &block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
block_cb = flow_block_cb_lookup(f->block, cb, dev);
if (block_cb) {
flow_block_cb_incref(block_cb);
return 0;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->hw = hw;
priv->dev = dev;
block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
if (IS_ERR(block_cb)) {
kfree(priv);
return PTR_ERR(block_cb);
}
flow_block_cb_incref(block_cb);
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &block_cb_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f->block, cb, dev);
if (!block_cb)
return -ENOENT;
if (!flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
kfree(block_cb->cb_priv);
}
return 0;
default:
return -EOPNOTSUPP;
}
}
static int
mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
enum tc_setup_type type, void *type_data)
{
struct mtk_wed_hw *hw = wed->hw;
if (hw->version < 2)
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_BLOCK:
case TC_SETUP_FT:
return mtk_wed_setup_tc_block(hw, dev, type_data);
default:
return -EOPNOTSUPP;
}
}
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
void __iomem *wdma, phys_addr_t wdma_phy,
int index)
{
static const struct mtk_wed_ops wed_ops = {
.attach = mtk_wed_attach,
.tx_ring_setup = mtk_wed_tx_ring_setup,
.rx_ring_setup = mtk_wed_rx_ring_setup,
.txfree_ring_setup = mtk_wed_txfree_ring_setup,
.msg_update = mtk_wed_mcu_msg_update,
.start = mtk_wed_start,
.stop = mtk_wed_stop,
.reset_dma = mtk_wed_reset_dma,
.reg_read = wed_r32,
.reg_write = wed_w32,
.irq_get = mtk_wed_irq_get,
.irq_set_mask = mtk_wed_irq_set_mask,
.detach = mtk_wed_detach,
.ppe_check = mtk_wed_ppe_check,
.setup_tc = mtk_wed_setup_tc,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;
struct mtk_wed_hw *hw;
struct regmap *regs;
int irq;
if (!np)
return;
pdev = of_find_device_by_node(np);
if (!pdev)
goto err_of_node_put;
get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
goto err_put_device;
regs = syscon_regmap_lookup_by_phandle(np, NULL);
if (IS_ERR(regs))
goto err_put_device;
rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
mutex_lock(&hw_lock);
if (WARN_ON(hw_list[index]))
goto unlock;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
goto unlock;
hw->node = np;
hw->regs = regs;
hw->eth = eth;
hw->dev = &pdev->dev;
hw->wdma_phy = wdma_phy;
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
if (hw->version == 1) {
hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
"mediatek,pcie-mirror");
hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
"mediatek,hifsys");
if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
kfree(hw);
goto unlock;
}
if (!index) {
regmap_write(hw->mirror, 0, 0);
regmap_write(hw->mirror, 4, 0);
}
}
mtk_wed_hw_add_debugfs(hw);
hw_list[index] = hw;
mutex_unlock(&hw_lock);
return;
unlock:
mutex_unlock(&hw_lock);
err_put_device:
put_device(&pdev->dev);
err_of_node_put:
of_node_put(np);
}
void mtk_wed_exit(void)
{
int i;
rcu_assign_pointer(mtk_soc_wed_ops, NULL);
synchronize_rcu();
for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
struct mtk_wed_hw *hw;
hw = hw_list[i];
if (!hw)
continue;
hw_list[i] = NULL;
debugfs_remove(hw->debugfs_dir);
put_device(hw->dev);
of_node_put(hw->node);
kfree(hw);
}
}
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_wed.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2022 MediaTek Inc.
*
* Author: Lorenzo Bianconi <[email protected]>
* Sujuan Chen <[email protected]>
*/
#include <linux/firmware.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/mfd/syscon.h>
#include <linux/soc/mediatek/mtk_wed.h>
#include <asm/unaligned.h>
#include "mtk_wed_regs.h"
#include "mtk_wed_wo.h"
#include "mtk_wed.h"
static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
{
return readl(wo->boot.addr + reg);
}
static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
writel(val, wo->boot.addr + reg);
}
static struct sk_buff *
mtk_wed_mcu_msg_alloc(const void *data, int data_len)
{
int length = sizeof(struct mtk_wed_mcu_hdr) + data_len;
struct sk_buff *skb;
skb = alloc_skb(length, GFP_KERNEL);
if (!skb)
return NULL;
memset(skb->head, 0, length);
skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr));
if (data && data_len)
skb_put_data(skb, data, data_len);
return skb;
}
static struct sk_buff *
mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
{
if (!time_is_after_jiffies(expires))
return NULL;
wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q),
expires - jiffies);
return skb_dequeue(&wo->mcu.res_q);
}
void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
{
skb_queue_tail(&wo->mcu.res_q, skb);
wake_up(&wo->mcu.wait);
}
static void
mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb)
{
u32 count = get_unaligned_le32(skb->data);
struct mtk_wed_wo_rx_stats *stats;
int i;
if (count * sizeof(*stats) > skb->len - sizeof(u32))
return;
stats = (struct mtk_wed_wo_rx_stats *)(skb->data + sizeof(u32));
for (i = 0 ; i < count ; i++)
wed->wlan.update_wo_rx_stats(wed, &stats[i]);
}
void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
struct sk_buff *skb)
{
struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
skb_pull(skb, sizeof(*hdr));
switch (hdr->cmd) {
case MTK_WED_WO_EVT_LOG_DUMP:
dev_notice(wo->hw->dev, "%s\n", skb->data);
break;
case MTK_WED_WO_EVT_PROFILING: {
struct mtk_wed_wo_log_info *info = (void *)skb->data;
u32 count = skb->len / sizeof(*info);
int i;
for (i = 0 ; i < count ; i++)
dev_notice(wo->hw->dev,
"SN:%u latency: total=%u, rro:%u, mod:%u\n",
le32_to_cpu(info[i].sn),
le32_to_cpu(info[i].total),
le32_to_cpu(info[i].rro),
le32_to_cpu(info[i].mod));
break;
}
case MTK_WED_WO_EVT_RXCNT_INFO:
mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
break;
default:
break;
}
dev_kfree_skb(skb);
}
static int
mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb,
int id, int cmd, u16 *wait_seq, bool wait_resp)
{
struct mtk_wed_mcu_hdr *hdr;
/* TODO: make it dynamic based on cmd */
wo->mcu.timeout = 20 * HZ;
hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr));
hdr->cmd = cmd;
hdr->length = cpu_to_le16(skb->len);
if (wait_resp && wait_seq) {
u16 seq = ++wo->mcu.seq;
if (!seq)
seq = ++wo->mcu.seq;
*wait_seq = seq;
hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP);
hdr->seq = cpu_to_le16(seq);
}
if (id == MTK_WED_MODULE_ID_WO)
hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
}
static int
mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb,
int cmd, int seq)
{
struct mtk_wed_mcu_hdr *hdr;
if (!skb) {
dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
return -ETIMEDOUT;
}
hdr = (struct mtk_wed_mcu_hdr *)skb->data;
if (le16_to_cpu(hdr->seq) != seq)
return -EAGAIN;
skb_pull(skb, sizeof(*hdr));
switch (cmd) {
case MTK_WED_WO_CMD_RXCNT_INFO:
mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
break;
default:
break;
}
return 0;
}
int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
const void *data, int len, bool wait_resp)
{
unsigned long expires;
struct sk_buff *skb;
u16 seq;
int ret;
skb = mtk_wed_mcu_msg_alloc(data, len);
if (!skb)
return -ENOMEM;
mutex_lock(&wo->mcu.mutex);
ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp);
if (ret || !wait_resp)
goto unlock;
expires = jiffies + wo->mcu.timeout;
do {
skb = mtk_wed_mcu_get_response(wo, expires);
ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq);
dev_kfree_skb(skb);
} while (ret == -EAGAIN);
unlock:
mutex_unlock(&wo->mcu.mutex);
return ret;
}
int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
int len)
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
if (dev->hw->version == 1)
return 0;
if (WARN_ON(!wo))
return -ENODEV;
return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len,
true);
}
static int
mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
struct mtk_wed_wo_memory_region *region)
{
struct reserved_mem *rmem;
struct device_node *np;
int index;
index = of_property_match_string(wo->hw->node, "memory-region-names",
region->name);
if (index < 0)
return index;
np = of_parse_phandle(wo->hw->node, "memory-region", index);
if (!np)
return -ENODEV;
rmem = of_reserved_mem_lookup(np);
of_node_put(np);
if (!rmem)
return -ENODEV;
region->phy_addr = rmem->base;
region->size = rmem->size;
region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
return !region->addr ? -EINVAL : 0;
}
static int
mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
struct mtk_wed_wo_memory_region *region)
{
const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
const struct mtk_wed_fw_trailer *trailer;
const struct mtk_wed_fw_region *fw_region;
trailer_ptr = fw->data + fw->size - sizeof(*trailer);
trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
first_region_ptr = region_ptr;
while (region_ptr < trailer_ptr) {
u32 length;
fw_region = (const struct mtk_wed_fw_region *)region_ptr;
length = le32_to_cpu(fw_region->len);
if (region->phy_addr != le32_to_cpu(fw_region->addr))
goto next;
if (region->size < length)
goto next;
if (first_region_ptr < ptr + length)
goto next;
if (region->shared && region->consumed)
return 0;
if (!region->shared || !region->consumed) {
memcpy_toio(region->addr, ptr, length);
region->consumed = true;
return 0;
}
next:
region_ptr += sizeof(*fw_region);
ptr += length;
}
return -EINVAL;
}
static int
mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
{
static struct mtk_wed_wo_memory_region mem_region[] = {
[MTK_WED_WO_REGION_EMI] = {
.name = "wo-emi",
},
[MTK_WED_WO_REGION_ILM] = {
.name = "wo-ilm",
},
[MTK_WED_WO_REGION_DATA] = {
.name = "wo-data",
.shared = true,
},
};
const struct mtk_wed_fw_trailer *trailer;
const struct firmware *fw;
const char *fw_name;
u32 val, boot_cr;
int ret, i;
/* load firmware region metadata */
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
if (ret)
return ret;
}
wo->boot.name = "wo-boot";
ret = mtk_wed_get_memory_region(wo, &wo->boot);
if (ret)
return ret;
/* set dummy cr */
wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
wo->hw->index + 1);
/* load firmware */
if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
fw_name = MT7981_FIRMWARE_WO;
else
fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
ret = request_firmware(&fw, fw_name, wo->hw->dev);
if (ret)
return ret;
trailer = (void *)(fw->data + fw->size -
sizeof(struct mtk_wed_fw_trailer));
dev_info(wo->hw->dev,
"MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n",
trailer->fw_ver, trailer->build_date);
dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
trailer->chip_id, trailer->num_region);
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
if (ret)
goto out;
}
/* set the start address */
boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
: MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
/* wo firmware reset */
wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
: MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
out:
release_firmware(fw);
return ret;
}
static u32
mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo)
{
return wed_r32(wo->hw->wed_dev,
MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL);
}
int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
{
u32 val;
int ret;
skb_queue_head_init(&wo->mcu.res_q);
init_waitqueue_head(&wo->mcu.wait);
mutex_init(&wo->mcu.mutex);
ret = mtk_wed_mcu_load_firmware(wo);
if (ret)
return ret;
return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val,
100, MTK_FW_DL_TIMEOUT);
}
MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_wed_mcu.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Felix Fietkau <[email protected]> */
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/dst_metadata.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
static DEFINE_SPINLOCK(ppe_lock);
static const struct rhashtable_params mtk_flow_l2_ht_params = {
.head_offset = offsetof(struct mtk_flow_entry, l2_node),
.key_offset = offsetof(struct mtk_flow_entry, data.bridge),
.key_len = offsetof(struct mtk_foe_bridge, key_end),
.automatic_shrinking = true,
};
static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
{
writel(val, ppe->base + reg);
}
static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
{
return readl(ppe->base + reg);
}
static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
{
u32 val;
val = ppe_r32(ppe, reg);
val &= ~mask;
val |= set;
ppe_w32(ppe, reg, val);
return val;
}
static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
{
return ppe_m32(ppe, reg, 0, val);
}
static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
{
return ppe_m32(ppe, reg, val, 0);
}
static u32 mtk_eth_timestamp(struct mtk_eth *eth)
{
return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
}
static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
{
int ret;
u32 val;
ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
!(val & MTK_PPE_GLO_CFG_BUSY),
20, MTK_PPE_WAIT_TIMEOUT_US);
if (ret)
dev_err(ppe->dev, "PPE table busy");
return ret;
}
static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
{
int ret;
u32 val;
ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
!(val & MTK_PPE_MIB_SER_CR_ST),
20, MTK_PPE_WAIT_TIMEOUT_US);
if (ret)
dev_err(ppe->dev, "MIB table busy");
return ret;
}
static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
{
u32 val, cnt_r0, cnt_r1, cnt_r2;
int ret;
val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
ret = mtk_ppe_mib_wait_busy(ppe);
if (ret)
return ret;
cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
/* 64 bit for each counter */
u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
*bytes = ((u64)cnt_r1 << 32) | cnt_r0;
*packets = ((u64)cnt_r3 << 32) | cnt_r2;
} else {
/* 48 bit byte counter, 40 bit packet counter */
u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
*bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
*packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
}
return 0;
}
static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
{
ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
}
static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
{
mtk_ppe_cache_clear(ppe);
ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
enable * MTK_PPE_CACHE_CTL_EN);
}
static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
{
u32 hv1, hv2, hv3;
u32 hash;
switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = e->ipv4.orig.ports;
hv2 = e->ipv4.orig.dest_ip;
hv3 = e->ipv4.orig.src_ip;
break;
case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
hv1 ^= e->ipv6.ports;
hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
hv2 ^= e->ipv6.dest_ip[0];
hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
hv3 ^= e->ipv6.src_ip[0];
break;
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
case MTK_PPE_PKT_TYPE_IPV6_6RD:
default:
WARN_ON_ONCE(1);
return MTK_PPE_HASH_MASK;
}
hash = (hv1 & hv2) | ((~hv1) & hv3);
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
hash <<= (ffs(eth->soc->hash_offset) - 1);
hash &= MTK_PPE_ENTRIES - 1;
return hash;
}
static inline struct mtk_foe_mac_info *
mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return &entry->bridge.l2;
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.l2;
return &entry->ipv4.l2;
}
static inline u32 *
mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return &entry->bridge.ib2;
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.ib2;
return &entry->ipv4.ib2;
}
int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int type, int l4proto, u8 pse_port, u8 *src_mac,
u8 *dest_mac)
{
struct mtk_foe_mac_info *l2;
u32 ports_pad, val;
memset(entry, 0, sizeof(*entry));
if (mtk_is_netsys_v2_or_greater(eth)) {
val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
entry->ib1 = val;
val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
} else {
int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
entry->ib1 = val;
val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
}
if (is_multicast_ether_addr(dest_mac))
val |= mtk_get_ib2_multicast_mask(eth);
ports_pad = 0xa5a5a500 | (l4proto & 0xff);
if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
entry->ipv4.orig.ports = ports_pad;
if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
entry->ipv6.ports = ports_pad;
if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
ether_addr_copy(entry->bridge.src_mac, src_mac);
ether_addr_copy(entry->bridge.dest_mac, dest_mac);
entry->bridge.ib2 = val;
l2 = &entry->bridge.l2;
} else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
entry->ipv6.ib2 = val;
l2 = &entry->ipv6.l2;
} else {
entry->ipv4.ib2 = val;
l2 = &entry->ipv4.l2;
}
l2->dest_mac_hi = get_unaligned_be32(dest_mac);
l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
l2->src_mac_hi = get_unaligned_be32(src_mac);
l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
l2->etype = ETH_P_IPV6;
else
l2->etype = ETH_P_IP;
return 0;
}
int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
struct mtk_foe_entry *entry, u8 port)
{
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
u32 val = *ib2;
if (mtk_is_netsys_v2_or_greater(eth)) {
val &= ~MTK_FOE_IB2_DEST_PORT_V2;
val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
} else {
val &= ~MTK_FOE_IB2_DEST_PORT;
val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
}
*ib2 = val;
return 0;
}
int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
struct mtk_foe_entry *entry, bool egress,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port)
{
int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
struct mtk_ipv4_tuple *t;
switch (type) {
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
if (egress) {
t = &entry->ipv4.new;
break;
}
fallthrough;
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
t = &entry->ipv4.orig;
break;
case MTK_PPE_PKT_TYPE_IPV6_6RD:
entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
return 0;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
t->src_ip = be32_to_cpu(src_addr);
t->dest_ip = be32_to_cpu(dest_addr);
if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
return 0;
t->src_port = be16_to_cpu(src_port);
t->dest_port = be16_to_cpu(dest_port);
return 0;
}
int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port)
{
int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
int i;
switch (type) {
case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
src = entry->dslite.tunnel_src_ip;
dest = entry->dslite.tunnel_dest_ip;
break;
case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
case MTK_PPE_PKT_TYPE_IPV6_6RD:
entry->ipv6.src_port = be16_to_cpu(src_port);
entry->ipv6.dest_port = be16_to_cpu(dest_port);
fallthrough;
case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
src = entry->ipv6.src_ip;
dest = entry->ipv6.dest_ip;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
for (i = 0; i < 4; i++)
src[i] = be32_to_cpu(src_addr[i]);
for (i = 0; i < 4; i++)
dest[i] = be32_to_cpu(dest_addr[i]);
return 0;
}
int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int port)
{
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
l2->etype = BIT(port);
if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
else
l2->etype |= BIT(8);
entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
return 0;
}
int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int vid)
{
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
case 0:
entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
mtk_prep_ib1_vlan_layer(eth, 1);
l2->vlan1 = vid;
return 0;
case 1:
if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
l2->vlan1 = vid;
l2->etype |= BIT(8);
} else {
l2->vlan2 = vid;
entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
}
return 0;
default:
return -ENOSPC;
}
}
int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int sid)
{
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
l2->etype = ETH_P_PPP_SES;
entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
l2->pppoe_id = sid;
return 0;
}
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int wdma_idx, int txq, int bss, int wcid)
{
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
switch (eth->soc->version) {
case 3:
*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
MTK_FOE_IB2_WDMA_WINFO_V2;
l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
break;
case 2:
*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
MTK_FOE_IB2_WDMA_WINFO_V2;
l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
break;
default:
*ib2 &= ~MTK_FOE_IB2_PORT_MG;
*ib2 |= MTK_FOE_IB2_WDMA_WINFO;
if (wdma_idx)
*ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
break;
}
return 0;
}
int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
unsigned int queue)
{
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
if (mtk_is_netsys_v2_or_greater(eth)) {
*ib2 &= ~MTK_FOE_IB2_QID_V2;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
*ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
} else {
*ib2 &= ~MTK_FOE_IB2_QID;
*ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
*ib2 |= MTK_FOE_IB2_PSE_QOS;
}
return 0;
}
static bool
mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
struct mtk_foe_entry *data)
{
int type, len;
if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
return false;
type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
len = offsetof(struct mtk_foe_entry, ipv6._rsv);
else
len = offsetof(struct mtk_foe_entry, ipv4.ib2);
return !memcmp(&entry->data.data, &data->data, len - 4);
}
static void
__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
struct hlist_head *head;
struct hlist_node *tmp;
if (entry->type == MTK_FLOW_TYPE_L2) {
rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
mtk_flow_l2_ht_params);
head = &entry->l2_flows;
hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
__mtk_foe_entry_clear(ppe, entry);
return;
}
hlist_del_init(&entry->list);
if (entry->hash != 0xffff) {
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
dma_wmb();
mtk_ppe_cache_clear(ppe);
if (ppe->accounting) {
struct mtk_foe_accounting *acct;
acct = ppe->acct_table + entry->hash * sizeof(*acct);
acct->packets = 0;
acct->bytes = 0;
}
}
entry->hash = 0xffff;
if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
return;
hlist_del_init(&entry->l2_data.list);
kfree(entry);
}
static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
{
u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
u16 now = mtk_eth_timestamp(ppe->eth);
u16 timestamp = ib1 & ib1_ts_mask;
if (timestamp > now)
return ib1_ts_mask + 1 - timestamp + now;
else
return now - timestamp;
}
static void
mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
struct mtk_flow_entry *cur;
struct mtk_foe_entry *hwe;
struct hlist_node *tmp;
int idle;
idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
int cur_idle;
u32 ib1;
hwe = mtk_foe_get_entry(ppe, cur->hash);
ib1 = READ_ONCE(hwe->ib1);
if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
cur->hash = 0xffff;
__mtk_foe_entry_clear(ppe, cur);
continue;
}
cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
if (cur_idle >= idle)
continue;
idle = cur_idle;
entry->data.ib1 &= ~ib1_ts_mask;
entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
}
}
static void
mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
struct mtk_foe_entry foe = {};
struct mtk_foe_entry *hwe;
spin_lock_bh(&ppe_lock);
if (entry->type == MTK_FLOW_TYPE_L2) {
mtk_flow_entry_update_l2(ppe, entry);
goto out;
}
if (entry->hash == 0xffff)
goto out;
hwe = mtk_foe_get_entry(ppe, entry->hash);
memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
entry->hash = 0xffff;
goto out;
}
entry->data.ib1 = foe.ib1;
out:
spin_unlock_bh(&ppe_lock);
}
static void
__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
u16 hash)
{
struct mtk_eth *eth = ppe->eth;
u16 timestamp = mtk_eth_timestamp(eth);
struct mtk_foe_entry *hwe;
u32 val;
if (mtk_is_netsys_v2_or_greater(eth)) {
entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
timestamp);
} else {
entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
timestamp);
}
hwe = mtk_foe_get_entry(ppe, hash);
memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
wmb();
hwe->ib1 = entry->ib1;
if (ppe->accounting) {
if (mtk_is_netsys_v2_or_greater(eth))
val = MTK_FOE_IB2_MIB_CNT_V2;
else
val = MTK_FOE_IB2_MIB_CNT;
*mtk_foe_entry_ib2(eth, hwe) |= val;
}
dma_wmb();
mtk_ppe_cache_clear(ppe);
}
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
spin_lock_bh(&ppe_lock);
__mtk_foe_entry_clear(ppe, entry);
spin_unlock_bh(&ppe_lock);
}
static int
mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
struct mtk_flow_entry *prev;
entry->type = MTK_FLOW_TYPE_L2;
prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
mtk_flow_l2_ht_params);
if (likely(!prev))
return 0;
if (IS_ERR(prev))
return PTR_ERR(prev);
return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
&entry->l2_node, mtk_flow_l2_ht_params);
}
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
const struct mtk_soc_data *soc = ppe->eth->soc;
int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
u32 hash;
if (type == MTK_PPE_PKT_TYPE_BRIDGE)
return mtk_foe_entry_commit_l2(ppe, entry);
hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
entry->hash = 0xffff;
spin_lock_bh(&ppe_lock);
hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
spin_unlock_bh(&ppe_lock);
return 0;
}
static void
mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u16 hash)
{
const struct mtk_soc_data *soc = ppe->eth->soc;
struct mtk_flow_entry *flow_info;
struct mtk_foe_entry foe = {}, *hwe;
struct mtk_foe_mac_info *l2;
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
if (!flow_info)
return;
flow_info->l2_data.base_flow = entry;
flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
flow_info->hash = hash;
hlist_add_head(&flow_info->list,
&ppe->foe_flow[hash / soc->hash_offset]);
hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
hwe = mtk_foe_get_entry(ppe, hash);
memcpy(&foe, hwe, soc->foe_entry_size);
foe.ib1 &= ib1_mask;
foe.ib1 |= entry->data.ib1 & ~ib1_mask;
l2 = mtk_foe_entry_l2(ppe->eth, &foe);
memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
l2->etype = ETH_P_IPV6;
*mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
__mtk_foe_entry_commit(ppe, &foe, hash);
}
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
{
const struct mtk_soc_data *soc = ppe->eth->soc;
struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
struct mtk_flow_entry *entry;
struct mtk_foe_bridge key = {};
struct hlist_node *n;
struct ethhdr *eh;
bool found = false;
u8 *tag;
spin_lock_bh(&ppe_lock);
if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
goto out;
hlist_for_each_entry_safe(entry, n, head, list) {
if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
MTK_FOE_STATE_BIND))
continue;
entry->hash = 0xffff;
__mtk_foe_entry_clear(ppe, entry);
continue;
}
if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
if (entry->hash != 0xffff)
entry->hash = 0xffff;
continue;
}
entry->hash = hash;
__mtk_foe_entry_commit(ppe, &entry->data, hash);
found = true;
}
if (found)
goto out;
eh = eth_hdr(skb);
ether_addr_copy(key.dest_mac, eh->h_dest);
ether_addr_copy(key.src_mac, eh->h_source);
tag = skb->data - 2;
key.vlan = 0;
switch (skb->protocol) {
#if IS_ENABLED(CONFIG_NET_DSA)
case htons(ETH_P_XDSA):
if (!netdev_uses_dsa(skb->dev) ||
skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
goto out;
if (!skb_metadata_dst(skb))
tag += 4;
if (get_unaligned_be16(tag) != ETH_P_8021Q)
break;
fallthrough;
#endif
case htons(ETH_P_8021Q):
key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
break;
default:
break;
}
entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
if (!entry)
goto out;
mtk_foe_entry_commit_subflow(ppe, entry, hash);
out:
spin_unlock_bh(&ppe_lock);
}
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
mtk_flow_entry_update(ppe, entry);
return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
}
int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
{
if (!ppe)
return -EINVAL;
/* disable KA */
ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
usleep_range(10000, 11000);
/* set KA timer to maximum */
ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
/* set KA tick select */
ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
usleep_range(10000, 11000);
/* disable scan mode */
ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
usleep_range(10000, 11000);
return mtk_ppe_wait_busy(ppe);
}
struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
struct mtk_foe_accounting *diff)
{
struct mtk_foe_accounting *acct;
int size = sizeof(struct mtk_foe_accounting);
u64 bytes, packets;
if (!ppe->accounting)
return NULL;
if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
return NULL;
acct = ppe->acct_table + index * size;
acct->bytes += bytes;
acct->packets += packets;
if (diff) {
diff->bytes = bytes;
diff->packets = packets;
}
return acct;
}
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
{
bool accounting = eth->soc->has_accounting;
const struct mtk_soc_data *soc = eth->soc;
struct mtk_foe_accounting *acct;
struct device *dev = eth->dev;
struct mtk_mib_entry *mib;
struct mtk_ppe *ppe;
u32 foe_flow_size;
void *foe;
ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
if (!ppe)
return NULL;
rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
/* need to allocate a separate device, since it PPE DMA access is
* not coherent.
*/
ppe->base = base;
ppe->eth = eth;
ppe->dev = dev;
ppe->version = eth->soc->offload_version;
ppe->accounting = accounting;
foe = dmam_alloc_coherent(ppe->dev,
MTK_PPE_ENTRIES * soc->foe_entry_size,
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
goto err_free_l2_flows;
ppe->foe_table = foe;
foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
sizeof(*ppe->foe_flow);
ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
if (!ppe->foe_flow)
goto err_free_l2_flows;
if (accounting) {
mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
&ppe->mib_phys, GFP_KERNEL);
if (!mib)
return NULL;
ppe->mib_table = mib;
acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
GFP_KERNEL);
if (!acct)
return NULL;
ppe->acct_table = acct;
}
mtk_ppe_debugfs_init(ppe, index);
return ppe;
err_free_l2_flows:
rhashtable_destroy(&ppe->l2_flows);
return NULL;
}
void mtk_ppe_deinit(struct mtk_eth *eth)
{
int i;
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
if (!eth->ppe[i])
return;
rhashtable_destroy(ð->ppe[i]->l2_flows);
}
}
static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
{
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
memset(ppe->foe_table, 0,
MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;
/* skip all entries that cross the 1024 byte boundary */
for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
for (k = 0; k < ARRAY_SIZE(skip); k++) {
struct mtk_foe_entry *hwe;
hwe = mtk_foe_get_entry(ppe, i + skip[k]);
hwe->ib1 |= MTK_FOE_IB1_STATIC;
}
}
}
void mtk_ppe_start(struct mtk_ppe *ppe)
{
u32 val;
if (!ppe)
return;
mtk_ppe_init_foe_table(ppe);
ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
val = MTK_PPE_TB_CFG_AGE_NON_L4 |
MTK_PPE_TB_CFG_AGE_UNBIND |
MTK_PPE_TB_CFG_AGE_TCP |
MTK_PPE_TB_CFG_AGE_UDP |
MTK_PPE_TB_CFG_AGE_TCP_FIN |
FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
MTK_PPE_KEEPALIVE_DISABLE) |
FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
if (mtk_is_netsys_v2_or_greater(ppe->eth))
val |= MTK_PPE_TB_CFG_INFO_SEL;
if (!mtk_is_netsys_v3_or_greater(ppe->eth))
val |= MTK_PPE_TB_CFG_ENTRY_80B;
ppe_w32(ppe, MTK_PPE_TB_CFG, val);
ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
mtk_ppe_cache_enable(ppe, true);
val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_6RD |
MTK_PPE_FLOW_CFG_IP4_NAT |
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
if (mtk_is_netsys_v2_or_greater(ppe->eth))
val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
MTK_PPE_MD_TOAP_BYP_CRSN1 |
MTK_PPE_MD_TOAP_BYP_CRSN2 |
MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
else
val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
val = MTK_PPE_BIND_LIMIT1_FULL |
FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
/* enable PPE */
val = MTK_PPE_GLO_CFG_EN |
MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
MTK_PPE_GLO_CFG_IP4_CS_DROP |
MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
}
if (ppe->accounting && ppe->mib_phys) {
ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
MTK_PPE_MIB_CFG_EN);
ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
MTK_PPE_MIB_CFG_RD_CLR);
ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
MTK_PPE_MIB_CFG_RD_CLR);
}
}
int mtk_ppe_stop(struct mtk_ppe *ppe)
{
u32 val;
int i;
if (!ppe)
return 0;
for (i = 0; i < MTK_PPE_ENTRIES; i++) {
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
MTK_FOE_STATE_INVALID);
}
mtk_ppe_cache_enable(ppe, false);
/* disable offload engine */
ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
/* disable aging */
val = MTK_PPE_TB_CFG_AGE_NON_L4 |
MTK_PPE_TB_CFG_AGE_UNBIND |
MTK_PPE_TB_CFG_AGE_TCP |
MTK_PPE_TB_CFG_AGE_UDP |
MTK_PPE_TB_CFG_AGE_TCP_FIN;
ppe_clear(ppe, MTK_PPE_TB_CFG, val);
return mtk_ppe_wait_busy(ppe);
}
|
linux-master
|
drivers/net/ethernet/mediatek/mtk_ppe.c
|
/*************************************************************************
* myri10ge.c: Myricom Myri-10G Ethernet driver.
*
* Copyright (C) 2005 - 2011 Myricom, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Myricom, Inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*
* If the eeprom on your board is not recent enough, you will need to get a
* newer firmware image at:
* http://www.myri.com/scs/download-Myri10GE.html
*
* Contact Information:
* <[email protected]>
* Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
*************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/tcp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/dca.h>
#include <linux/ip.h>
#include <linux/inet.h>
#include <linux/in.h>
#include <linux/ethtool.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <net/checksum.h>
#include <net/gso.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <asm/byteorder.h>
#include <asm/processor.h>
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
#define MYRI10GE_VERSION_STR "1.5.3-1.534"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: [email protected]");
MODULE_VERSION(MYRI10GE_VERSION_STR);
MODULE_LICENSE("Dual BSD/GPL");
#define MYRI10GE_MAX_ETHER_MTU 9014
#define MYRI10GE_ETH_STOPPED 0
#define MYRI10GE_ETH_STOPPING 1
#define MYRI10GE_ETH_STARTING 2
#define MYRI10GE_ETH_RUNNING 3
#define MYRI10GE_ETH_OPEN_FAILED 4
#define MYRI10GE_EEPROM_STRINGS_SIZE 256
#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
#define MYRI10GE_ALLOC_ORDER 0
#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
#define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
#define MYRI10GE_MAX_SLICES 32
struct myri10ge_rx_buffer_state {
struct page *page;
int page_offset;
DEFINE_DMA_UNMAP_ADDR(bus);
DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_tx_buffer_state {
struct sk_buff *skb;
int last;
DEFINE_DMA_UNMAP_ADDR(bus);
DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_cmd {
u32 data0;
u32 data1;
u32 data2;
};
struct myri10ge_rx_buf {
struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
struct myri10ge_rx_buffer_state *info;
struct page *page;
dma_addr_t bus;
int page_offset;
int cnt;
int fill_cnt;
int alloc_fail;
int mask; /* number of rx slots -1 */
int watchdog_needed;
};
struct myri10ge_tx_buf {
struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
__be32 __iomem *send_go; /* "go" doorbell ptr */
__be32 __iomem *send_stop; /* "stop" doorbell ptr */
struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
char *req_bytes;
struct myri10ge_tx_buffer_state *info;
int mask; /* number of transmit slots -1 */
int req ____cacheline_aligned; /* transmit slots submitted */
int pkt_start; /* packets started */
int stop_queue;
int linearized;
int done ____cacheline_aligned; /* transmit slots completed */
int pkt_done; /* packets completed */
int wake_queue;
int queue_active;
};
struct myri10ge_rx_done {
struct mcp_slot *entry;
dma_addr_t bus;
int cnt;
int idx;
};
struct myri10ge_slice_netstats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long rx_dropped;
unsigned long tx_dropped;
};
struct myri10ge_slice_state {
struct myri10ge_tx_buf tx; /* transmit ring */
struct myri10ge_rx_buf rx_small;
struct myri10ge_rx_buf rx_big;
struct myri10ge_rx_done rx_done;
struct net_device *dev;
struct napi_struct napi;
struct myri10ge_priv *mgp;
struct myri10ge_slice_netstats stats;
__be32 __iomem *irq_claim;
struct mcp_irq_data *fw_stats;
dma_addr_t fw_stats_bus;
int watchdog_tx_done;
int watchdog_tx_req;
int watchdog_rx_done;
int stuck;
#ifdef CONFIG_MYRI10GE_DCA
int cached_dca_tag;
int cpu;
__be32 __iomem *dca_tag;
#endif
char irq_desc[32];
};
struct myri10ge_priv {
struct myri10ge_slice_state *ss;
int tx_boundary; /* boundary transmits cannot cross */
int num_slices;
int running; /* running? */
int small_bytes;
int big_bytes;
int max_intr_slots;
struct net_device *dev;
u8 __iomem *sram;
int sram_size;
unsigned long board_span;
unsigned long iomem_base;
__be32 __iomem *irq_deassert;
char *mac_addr_string;
struct mcp_cmd_response *cmd;
dma_addr_t cmd_bus;
struct pci_dev *pdev;
int msi_enabled;
int msix_enabled;
struct msix_entry *msix_vectors;
#ifdef CONFIG_MYRI10GE_DCA
int dca_enabled;
int relaxed_order;
#endif
u32 link_state;
unsigned int rdma_tags_available;
int intr_coal_delay;
__be32 __iomem *intr_coal_delay_ptr;
int wc_cookie;
int down_cnt;
wait_queue_head_t down_wq;
struct work_struct watchdog_work;
struct timer_list watchdog_timer;
int watchdog_resets;
int watchdog_pause;
int pause;
bool fw_name_allocated;
char *fw_name;
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
char *product_code_string;
char fw_version[128];
int fw_ver_major;
int fw_ver_minor;
int fw_ver_tiny;
int adopted_rx_filter_bug;
u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
u32 features;
u32 max_tso6;
u32 read_dma;
u32 write_dma;
u32 read_write_dma;
u32 link_changes;
u32 msg_enable;
unsigned int board_number;
int rebooted;
};
static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
/* Careful: must be accessed under kernel_param_lock() */
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, 0644);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
#define MYRI10GE_MAX_BOARDS 8
static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
{[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
0444);
MODULE_PARM_DESC(myri10ge_fw_names, "Firmware image names per board");
static int myri10ge_ecrc_enable = 1;
module_param(myri10ge_ecrc_enable, int, 0444);
MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
static int myri10ge_small_bytes = -1; /* -1 == auto */
module_param(myri10ge_small_bytes, int, 0644);
MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
static int myri10ge_msi = 1; /* enable msi by default */
module_param(myri10ge_msi, int, 0644);
MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
static int myri10ge_intr_coal_delay = 75;
module_param(myri10ge_intr_coal_delay, int, 0444);
MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
static int myri10ge_flow_control = 1;
module_param(myri10ge_flow_control, int, 0444);
MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
static int myri10ge_deassert_wait = 1;
module_param(myri10ge_deassert_wait, int, 0644);
MODULE_PARM_DESC(myri10ge_deassert_wait,
"Wait when deasserting legacy interrupts");
static int myri10ge_force_firmware = 0;
module_param(myri10ge_force_firmware, int, 0444);
MODULE_PARM_DESC(myri10ge_force_firmware,
"Force firmware to assume aligned completions");
static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
module_param(myri10ge_initial_mtu, int, 0444);
MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
static int myri10ge_napi_weight = 64;
module_param(myri10ge_napi_weight, int, 0444);
MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
static int myri10ge_watchdog_timeout = 1;
module_param(myri10ge_watchdog_timeout, int, 0444);
MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
static int myri10ge_max_irq_loops = 1048576;
module_param(myri10ge_max_irq_loops, int, 0444);
MODULE_PARM_DESC(myri10ge_max_irq_loops,
"Set stuck legacy IRQ detection threshold");
#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
static int myri10ge_debug = -1; /* defaults above */
module_param(myri10ge_debug, int, 0);
MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
static int myri10ge_fill_thresh = 256;
module_param(myri10ge_fill_thresh, int, 0644);
MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
static int myri10ge_reset_recover = 1;
static int myri10ge_max_slices = 1;
module_param(myri10ge_max_slices, int, 0444);
MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
module_param(myri10ge_rss_hash, int, 0444);
MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
static int myri10ge_dca = 1;
module_param(myri10ge_dca, int, 0444);
MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
#define MYRI10GE_FW_OFFSET 1024*1024
#define MYRI10GE_HIGHPART_TO_U32(X) \
(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
#define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
static void myri10ge_set_multicast_list(struct net_device *dev);
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev);
static inline void put_be32(__be32 val, __be32 __iomem * p)
{
__raw_writel((__force __u32) val, (__force void __iomem *)p);
}
static void myri10ge_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats);
static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
{
if (mgp->fw_name_allocated)
kfree(mgp->fw_name);
mgp->fw_name = name;
mgp->fw_name_allocated = allocated;
}
static int
myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
struct myri10ge_cmd *data, int atomic)
{
struct mcp_cmd *buf;
char buf_bytes[sizeof(*buf) + 8];
struct mcp_cmd_response *response = mgp->cmd;
char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
u32 dma_low, dma_high, result, value;
int sleep_total = 0;
/* ensure buf is aligned to 8 bytes */
buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
buf->data0 = htonl(data->data0);
buf->data1 = htonl(data->data1);
buf->data2 = htonl(data->data2);
buf->cmd = htonl(cmd);
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf->response_addr.low = htonl(dma_low);
buf->response_addr.high = htonl(dma_high);
response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
mb();
myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
/* wait up to 15ms. Longest command is the DMA benchmark,
* which is capped at 5ms, but runs from a timeout handler
* that runs every 7.8ms. So a 15ms timeout leaves us with
* a 2.2ms margin
*/
if (atomic) {
/* if atomic is set, do not sleep,
* and try to get the completion quickly
* (1ms will be enough for those commands) */
for (sleep_total = 0;
sleep_total < 1000 &&
response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
sleep_total += 10) {
udelay(10);
mb();
}
} else {
/* use msleep for most command */
for (sleep_total = 0;
sleep_total < 15 &&
response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
sleep_total++)
msleep(1);
}
result = ntohl(response->result);
value = ntohl(response->data);
if (result != MYRI10GE_NO_RESPONSE_RESULT) {
if (result == 0) {
data->data0 = value;
return 0;
} else if (result == MXGEFW_CMD_UNKNOWN) {
return -ENOSYS;
} else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
return -E2BIG;
} else if (result == MXGEFW_CMD_ERROR_RANGE &&
cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
(data->
data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
0) {
return -ERANGE;
} else {
dev_err(&mgp->pdev->dev,
"command %d failed, result = %d\n",
cmd, result);
return -ENXIO;
}
}
dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
cmd, result);
return -EAGAIN;
}
/*
* The eeprom strings on the lanaiX have the format
* SN=x\0
* MAC=x:x:x:x:x:x\0
* PT:ddd mmm xx xx:xx:xx xx\0
* PV:ddd mmm xx xx:xx:xx xx\0
*/
static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
{
char *ptr, *limit;
int i;
ptr = mgp->eeprom_strings;
limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
while (*ptr != '\0' && ptr < limit) {
if (memcmp(ptr, "MAC=", 4) == 0) {
ptr += 4;
mgp->mac_addr_string = ptr;
for (i = 0; i < 6; i++) {
if ((ptr + 2) > limit)
goto abort;
mgp->mac_addr[i] =
simple_strtoul(ptr, &ptr, 16);
ptr += 1;
}
}
if (memcmp(ptr, "PC=", 3) == 0) {
ptr += 3;
mgp->product_code_string = ptr;
}
if (memcmp((const void *)ptr, "SN=", 3) == 0) {
ptr += 3;
mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
}
while (ptr < limit && *ptr++) ;
}
return 0;
abort:
dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
return -ENXIO;
}
/*
* Enable or disable periodic RDMAs from the host to make certain
* chipsets resend dropped PCIe messages
*/
static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
{
char __iomem *submit;
__be32 buf[16] __attribute__ ((__aligned__(8)));
u32 dma_low, dma_high;
int i;
/* clear confirmation addr */
mgp->cmd->data = 0;
mb();
/* send a rdma command to the PCIe engine, and wait for the
* response in the confirmation address. The firmware should
* write a -1 there to indicate it is alive and well
*/
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf[0] = htonl(dma_high); /* confirm addr MSW */
buf[1] = htonl(dma_low); /* confirm addr LSW */
buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
buf[3] = htonl(dma_high); /* dummy addr MSW */
buf[4] = htonl(dma_low); /* dummy addr LSW */
buf[5] = htonl(enable); /* enable? */
submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
msleep(1);
if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
(enable ? "enable" : "disable"));
}
static int
myri10ge_validate_firmware(struct myri10ge_priv *mgp,
struct mcp_gen_header *hdr)
{
struct device *dev = &mgp->pdev->dev;
/* check firmware type */
if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
return -EINVAL;
}
/* save firmware version for ethtool */
strscpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
&mgp->fw_ver_minor, &mgp->fw_ver_tiny);
if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
MXGEFW_VERSION_MINOR);
return -EINVAL;
}
return 0;
}
static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
{
unsigned crc, reread_crc;
const struct firmware *fw;
struct device *dev = &mgp->pdev->dev;
unsigned char *fw_readback;
struct mcp_gen_header *hdr;
size_t hdr_offset;
int status;
unsigned i;
if (request_firmware(&fw, mgp->fw_name, dev) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
mgp->fw_name);
status = -EINVAL;
goto abort_with_nothing;
}
/* check size */
if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
fw->size < MCP_HEADER_PTR_OFFSET + 4) {
dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
status = -EINVAL;
goto abort_with_fw;
}
/* check id */
hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
dev_err(dev, "Bad firmware file\n");
status = -EINVAL;
goto abort_with_fw;
}
hdr = (void *)(fw->data + hdr_offset);
status = myri10ge_validate_firmware(mgp, hdr);
if (status != 0)
goto abort_with_fw;
crc = crc32(~0, fw->data, fw->size);
for (i = 0; i < fw->size; i += 256) {
myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
fw->data + i,
min(256U, (unsigned)(fw->size - i)));
mb();
readb(mgp->sram);
}
fw_readback = vmalloc(fw->size);
if (!fw_readback) {
status = -ENOMEM;
goto abort_with_fw;
}
/* corruption checking is good for parity recovery and buggy chipset */
memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
reread_crc = crc32(~0, fw_readback, fw->size);
vfree(fw_readback);
if (crc != reread_crc) {
dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
(unsigned)fw->size, reread_crc, crc);
status = -EIO;
goto abort_with_fw;
}
*size = (u32) fw->size;
abort_with_fw:
release_firmware(fw);
abort_with_nothing:
return status;
}
static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
{
struct mcp_gen_header *hdr;
struct device *dev = &mgp->pdev->dev;
const size_t bytes = sizeof(struct mcp_gen_header);
size_t hdr_offset;
int status;
/* find running firmware header */
hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
dev_err(dev, "Running firmware has bad header offset (%d)\n",
(int)hdr_offset);
return -EIO;
}
/* copy header of running firmware from SRAM to host memory to
* validate firmware */
hdr = kmalloc(bytes, GFP_KERNEL);
if (hdr == NULL)
return -ENOMEM;
memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
status = myri10ge_validate_firmware(mgp, hdr);
kfree(hdr);
/* check to see if adopted firmware has bug where adopting
* it will cause broadcasts to be filtered unless the NIC
* is kept in ALLMULTI mode */
if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
mgp->adopted_rx_filter_bug = 1;
dev_warn(dev, "Adopting fw %d.%d.%d: "
"working around rx filter bug\n",
mgp->fw_ver_major, mgp->fw_ver_minor,
mgp->fw_ver_tiny);
}
return status;
}
static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
int status;
/* probe for IPv6 TSO support */
mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
&cmd, 0);
if (status == 0) {
mgp->max_tso6 = cmd.data0;
mgp->features |= NETIF_F_TSO6;
}
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
return -ENXIO;
}
mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
return 0;
}
static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
{
char __iomem *submit;
__be32 buf[16] __attribute__ ((__aligned__(8)));
u32 dma_low, dma_high, size;
int status, i;
size = 0;
status = myri10ge_load_hotplug_firmware(mgp, &size);
if (status) {
if (!adopt)
return status;
dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
/* Do not attempt to adopt firmware if there
* was a bad crc */
if (status == -EIO)
return status;
status = myri10ge_adopt_running_firmware(mgp);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to adopt running firmware\n");
return status;
}
dev_info(&mgp->pdev->dev,
"Successfully adopted running firmware\n");
if (mgp->tx_boundary == 4096) {
dev_warn(&mgp->pdev->dev,
"Using firmware currently running on NIC"
". For optimal\n");
dev_warn(&mgp->pdev->dev,
"performance consider loading optimized "
"firmware\n");
dev_warn(&mgp->pdev->dev, "via hotplug\n");
}
set_fw_name(mgp, "adopted", false);
mgp->tx_boundary = 2048;
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
return status;
}
/* clear confirmation addr */
mgp->cmd->data = 0;
mb();
/* send a reload command to the bootstrap MCP, and wait for the
* response in the confirmation address. The firmware should
* write a -1 there to indicate it is alive and well
*/
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf[0] = htonl(dma_high); /* confirm addr MSW */
buf[1] = htonl(dma_low); /* confirm addr LSW */
buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
/* FIX: All newest firmware should un-protect the bottom of
* the sram before handoff. However, the very first interfaces
* do not. Therefore the handoff copy must skip the first 8 bytes
*/
buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
buf[4] = htonl(size - 8); /* length of code */
buf[5] = htonl(8); /* where to copy to */
buf[6] = htonl(0); /* where to jump to */
submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
mb();
msleep(1);
mb();
i = 0;
while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
msleep(1 << i);
i++;
}
if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
dev_err(&mgp->pdev->dev, "handoff failed\n");
return -ENXIO;
}
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
return status;
}
static int myri10ge_update_mac_address(struct myri10ge_priv *mgp,
const u8 * addr)
{
struct myri10ge_cmd cmd;
int status;
cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
| (addr[2] << 8) | addr[3]);
cmd.data1 = ((addr[4] << 8) | (addr[5]));
status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
return status;
}
static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
{
struct myri10ge_cmd cmd;
int status, ctl;
ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
if (status) {
netdev_err(mgp->dev, "Failed to set flow control mode\n");
return status;
}
mgp->pause = pause;
return 0;
}
static void
myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
{
struct myri10ge_cmd cmd;
int status, ctl;
ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
if (status)
netdev_err(mgp->dev, "Failed to set promisc mode\n");
}
static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
{
struct myri10ge_cmd cmd;
int status;
u32 len;
struct page *dmatest_page;
dma_addr_t dmatest_bus;
char *test = " ";
dmatest_page = alloc_page(GFP_KERNEL);
if (!dmatest_page)
return -ENOMEM;
dmatest_bus = dma_map_page(&mgp->pdev->dev, dmatest_page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(&mgp->pdev->dev, dmatest_bus))) {
__free_page(dmatest_page);
return -ENOMEM;
}
/* Run a small DMA test.
* The magic multipliers to the length tell the firmware
* to do DMA read, write, or read+write tests. The
* results are returned in cmd.data0. The upper 16
* bits or the return is the number of transfers completed.
* The lower 16 bits is the time in 0.5us ticks that the
* transfers took to complete.
*/
len = mgp->tx_boundary;
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10000;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "read";
goto abort;
}
mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x1;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "write";
goto abort;
}
mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10001;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "read/write";
goto abort;
}
mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
(cmd.data0 & 0xffff);
abort:
dma_unmap_page(&mgp->pdev->dev, dmatest_bus, PAGE_SIZE,
DMA_BIDIRECTIONAL);
put_page(dmatest_page);
if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
test, status);
return status;
}
static int myri10ge_reset(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int i, status;
size_t bytes;
#ifdef CONFIG_MYRI10GE_DCA
unsigned long dca_tag_off;
#endif
/* try to send a reset command to the card to see if it
* is alive */
memset(&cmd, 0, sizeof(cmd));
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed reset\n");
return -ENXIO;
}
(void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
/*
* Use non-ndis mcp_slot (eg, 4 bytes total,
* no toeplitz hash value returned. Older firmware will
* not understand this command, but will use the correct
* sized mcp_slot, so we ignore error returns
*/
cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
(void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
/* Now exchange information about interrupts */
bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
cmd.data0 = (u32) bytes;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
/*
* Even though we already know how many slices are supported
* via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
* has magic side effects, and must be called after a reset.
* It must be called prior to calling any RSS related cmds,
* including assigning an interrupt queue for anything but
* slice 0. It must also be called *after*
* MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
* the firmware to compute offsets.
*/
if (mgp->num_slices > 1) {
/* ask the maximum number of slices it supports */
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
&cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to get number of slices\n");
}
/*
* MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
* to setting up the interrupt queue DMA
*/
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
if (mgp->dev->real_num_tx_queues > 1)
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
/* Firmware older than 1.4.32 only supports multiple
* RX queues, so if we get an error, first retry using a
* single TX queue before giving up */
if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
netif_set_real_num_tx_queues(mgp->dev, 1);
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
status = myri10ge_send_cmd(mgp,
MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
}
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to set number of slices\n");
return status;
}
}
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
cmd.data2 = i;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
&cmd, 0);
}
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
ss->irq_claim =
(__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
}
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
&cmd, 0);
mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
status |= myri10ge_send_cmd
(mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
return status;
}
put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
#ifdef CONFIG_MYRI10GE_DCA
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
dca_tag_off = cmd.data0;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
if (status == 0) {
ss->dca_tag = (__iomem __be32 *)
(mgp->sram + dca_tag_off + 4 * i);
} else {
ss->dca_tag = NULL;
}
}
#endif /* CONFIG_MYRI10GE_DCA */
/* reset mcp/driver shared state back to 0 */
mgp->link_changes = 0;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
memset(ss->rx_done.entry, 0, bytes);
ss->tx.req = 0;
ss->tx.done = 0;
ss->tx.pkt_start = 0;
ss->tx.pkt_done = 0;
ss->rx_big.cnt = 0;
ss->rx_small.cnt = 0;
ss->rx_done.idx = 0;
ss->rx_done.cnt = 0;
ss->tx.wake_queue = 0;
ss->tx.stop_queue = 0;
}
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
myri10ge_change_pause(mgp, mgp->pause);
myri10ge_set_multicast_list(mgp->dev);
return status;
}
#ifdef CONFIG_MYRI10GE_DCA
static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
{
int ret;
u16 ctl;
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &ctl);
ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
if (ret != on) {
ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
ctl |= (on << 4);
pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, ctl);
}
return ret;
}
static void
myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
{
ss->cached_dca_tag = tag;
put_be32(htonl(tag), ss->dca_tag);
}
static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
{
int cpu = get_cpu();
int tag;
if (cpu != ss->cpu) {
tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
if (ss->cached_dca_tag != tag)
myri10ge_write_dca(ss, cpu, tag);
ss->cpu = cpu;
}
put_cpu();
}
static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
{
int err, i;
struct pci_dev *pdev = mgp->pdev;
if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
return;
if (!myri10ge_dca) {
dev_err(&pdev->dev, "dca disabled by administrator\n");
return;
}
err = dca_add_requester(&pdev->dev);
if (err) {
if (err != -ENODEV)
dev_err(&pdev->dev,
"dca_add_requester() failed, err=%d\n", err);
return;
}
mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
mgp->dca_enabled = 1;
for (i = 0; i < mgp->num_slices; i++) {
mgp->ss[i].cpu = -1;
mgp->ss[i].cached_dca_tag = -1;
myri10ge_update_dca(&mgp->ss[i]);
}
}
static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
if (!mgp->dca_enabled)
return;
mgp->dca_enabled = 0;
if (mgp->relaxed_order)
myri10ge_toggle_relaxed(pdev, 1);
dca_remove_requester(&pdev->dev);
}
static int myri10ge_notify_dca_device(struct device *dev, void *data)
{
struct myri10ge_priv *mgp;
unsigned long event;
mgp = dev_get_drvdata(dev);
event = *(unsigned long *)data;
if (event == DCA_PROVIDER_ADD)
myri10ge_setup_dca(mgp);
else if (event == DCA_PROVIDER_REMOVE)
myri10ge_teardown_dca(mgp);
return 0;
}
#endif /* CONFIG_MYRI10GE_DCA */
static inline void
myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
struct mcp_kreq_ether_recv *src)
{
__be32 low;
low = src->addr_low;
src->addr_low = htonl(DMA_BIT_MASK(32));
myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
mb();
myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
mb();
src->addr_low = low;
put_be32(low, &dst->addr_low);
mb();
}
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
{
struct page *page;
dma_addr_t bus;
int idx;
#if MYRI10GE_ALLOC_SIZE > 4096
int end_offset;
#endif
if (unlikely(rx->watchdog_needed && !watchdog))
return;
/* try to refill entire ring */
while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
idx = rx->fill_cnt & rx->mask;
if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
/* we can use part of previous page */
get_page(rx->page);
} else {
/* we need a new page */
page =
alloc_pages(GFP_ATOMIC | __GFP_COMP,
MYRI10GE_ALLOC_ORDER);
if (unlikely(page == NULL)) {
if (rx->fill_cnt - rx->cnt < 16)
rx->watchdog_needed = 1;
return;
}
bus = dma_map_page(&mgp->pdev->dev, page, 0,
MYRI10GE_ALLOC_SIZE,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) {
__free_pages(page, MYRI10GE_ALLOC_ORDER);
if (rx->fill_cnt - rx->cnt < 16)
rx->watchdog_needed = 1;
return;
}
rx->page = page;
rx->page_offset = 0;
rx->bus = bus;
}
rx->info[idx].page = rx->page;
rx->info[idx].page_offset = rx->page_offset;
/* note that this is the address of the start of the
* page */
dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
rx->shadow[idx].addr_low =
htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
rx->shadow[idx].addr_high =
htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
/* start next packet on a cacheline boundary */
rx->page_offset += SKB_DATA_ALIGN(bytes);
#if MYRI10GE_ALLOC_SIZE > 4096
/* don't cross a 4KB boundary */
end_offset = rx->page_offset + bytes - 1;
if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
rx->page_offset = end_offset & ~4095;
#endif
rx->fill_cnt++;
/* copy 8 descriptors to the firmware at a time */
if ((idx & 7) == 7) {
myri10ge_submit_8rx(&rx->lanai[idx - 7],
&rx->shadow[idx - 7]);
}
}
}
static inline void
myri10ge_unmap_rx_page(struct pci_dev *pdev,
struct myri10ge_rx_buffer_state *info, int bytes)
{
/* unmap the recvd page if we're the only or last user of it */
if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
(info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
dma_unmap_page(&pdev->dev, (dma_unmap_addr(info, bus)
& ~(MYRI10GE_ALLOC_SIZE - 1)),
MYRI10GE_ALLOC_SIZE, DMA_FROM_DEVICE);
}
}
/*
* GRO does not support acceleration of tagged vlan frames, and
* this NIC does not support vlan tag offload, so we must pop
* the tag ourselves to be able to achieve GRO performance that
* is comparable to LRO.
*/
static inline void
myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
{
u8 *va;
struct vlan_ethhdr *veh;
skb_frag_t *frag;
__wsum vsum;
va = addr;
va += MXGEFW_PAD;
veh = (struct vlan_ethhdr *)va;
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
NETIF_F_HW_VLAN_CTAG_RX &&
veh->h_vlan_proto == htons(ETH_P_8021Q)) {
/* fixup csum if needed */
if (skb->ip_summed == CHECKSUM_COMPLETE) {
vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0);
skb->csum = csum_sub(skb->csum, vsum);
}
/* pop tag */
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(veh->h_vlan_TCI));
memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
skb->len -= VLAN_HLEN;
skb->data_len -= VLAN_HLEN;
frag = skb_shinfo(skb)->frags;
skb_frag_off_add(frag, VLAN_HLEN);
skb_frag_size_sub(frag, VLAN_HLEN);
}
}
#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
static inline int
myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
skb_frag_t *rx_frags;
struct myri10ge_rx_buf *rx;
int i, idx, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
if (len <= mgp->small_bytes) {
rx = &ss->rx_small;
bytes = mgp->small_bytes;
} else {
rx = &ss->rx_big;
bytes = mgp->big_bytes;
}
len += MXGEFW_PAD;
idx = rx->cnt & rx->mask;
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
skb = napi_get_frags(&ss->napi);
if (unlikely(skb == NULL)) {
ss->stats.rx_dropped++;
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
put_page(rx->info[idx].page);
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
}
return 0;
}
rx_frags = skb_shinfo(skb)->frags;
/* Fill skb_frag_t(s) with data from our receive */
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
skb_fill_page_desc(skb, i, rx->info[idx].page,
rx->info[idx].page_offset,
remainder < MYRI10GE_ALLOC_SIZE ?
remainder : MYRI10GE_ALLOC_SIZE);
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
}
/* remove padding */
skb_frag_off_add(&rx_frags[0], MXGEFW_PAD);
skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
len -= MXGEFW_PAD;
skb->len = len;
skb->data_len = len;
skb->truesize += len;
if (dev->features & NETIF_F_RXCSUM) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum;
}
myri10ge_vlan_rx(mgp->dev, va, skb);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
napi_gro_frags(&ss->napi);
return 1;
}
static inline void
myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
{
struct pci_dev *pdev = ss->mgp->pdev;
struct myri10ge_tx_buf *tx = &ss->tx;
struct netdev_queue *dev_queue;
struct sk_buff *skb;
int idx, len;
while (tx->pkt_done != mcp_index) {
idx = tx->done & tx->mask;
skb = tx->info[idx].skb;
/* Mark as free */
tx->info[idx].skb = NULL;
if (tx->info[idx].last) {
tx->pkt_done++;
tx->info[idx].last = 0;
}
tx->done++;
len = dma_unmap_len(&tx->info[idx], len);
dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_bytes += skb->len;
ss->stats.tx_packets++;
dev_consume_skb_irq(skb);
if (len)
dma_unmap_single(&pdev->dev,
dma_unmap_addr(&tx->info[idx],
bus), len,
DMA_TO_DEVICE);
} else {
if (len)
dma_unmap_page(&pdev->dev,
dma_unmap_addr(&tx->info[idx],
bus), len,
DMA_TO_DEVICE);
}
}
dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
/*
* Make a minimal effort to prevent the NIC from polling an
* idle tx queue. If we can't get the lock we leave the queue
* active. In this case, either a thread was about to start
* using the queue anyway, or we lost a race and the NIC will
* waste some of its resources polling an inactive queue for a
* while.
*/
if ((ss->mgp->dev->real_num_tx_queues > 1) &&
__netif_tx_trylock(dev_queue)) {
if (tx->req == tx->done) {
tx->queue_active = 0;
put_be32(htonl(1), tx->send_stop);
mb();
}
__netif_tx_unlock(dev_queue);
}
/* start the queue if we've stopped it */
if (netif_tx_queue_stopped(dev_queue) &&
tx->req - tx->done < (tx->mask >> 1) &&
ss->mgp->running == MYRI10GE_ETH_RUNNING) {
tx->wake_queue++;
netif_tx_wake_queue(dev_queue);
}
}
static inline int
myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
{
struct myri10ge_rx_done *rx_done = &ss->rx_done;
struct myri10ge_priv *mgp = ss->mgp;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
int idx = rx_done->idx;
int cnt = rx_done->cnt;
int work_done = 0;
u16 length;
__wsum checksum;
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
rx_done->entry[idx].length = 0;
checksum = csum_unfold(rx_done->entry[idx].checksum);
rx_ok = myri10ge_rx_done(ss, length, checksum);
rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length;
cnt++;
idx = cnt & (mgp->max_intr_slots - 1);
work_done++;
}
rx_done->idx = idx;
rx_done->cnt = cnt;
ss->stats.rx_packets += rx_packets;
ss->stats.rx_bytes += rx_bytes;
/* restock receive rings if needed */
if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
return work_done;
}
static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
{
struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
if (unlikely(stats->stats_updated)) {
unsigned link_up = ntohl(stats->link_up);
if (mgp->link_state != link_up) {
mgp->link_state = link_up;
if (mgp->link_state == MXGEFW_LINK_UP) {
netif_info(mgp, link, mgp->dev, "link up\n");
netif_carrier_on(mgp->dev);
mgp->link_changes++;
} else {
netif_info(mgp, link, mgp->dev, "link %s\n",
(link_up == MXGEFW_LINK_MYRINET ?
"mismatch (Myrinet detected)" :
"down"));
netif_carrier_off(mgp->dev);
mgp->link_changes++;
}
}
if (mgp->rdma_tags_available !=
ntohl(stats->rdma_tags_available)) {
mgp->rdma_tags_available =
ntohl(stats->rdma_tags_available);
netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
mgp->rdma_tags_available);
}
mgp->down_cnt += stats->link_down;
if (stats->link_down)
wake_up(&mgp->down_wq);
}
}
static int myri10ge_poll(struct napi_struct *napi, int budget)
{
struct myri10ge_slice_state *ss =
container_of(napi, struct myri10ge_slice_state, napi);
int work_done;
#ifdef CONFIG_MYRI10GE_DCA
if (ss->mgp->dca_enabled)
myri10ge_update_dca(ss);
#endif
/* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(ss, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
put_be32(htonl(3), ss->irq_claim);
}
return work_done;
}
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
struct myri10ge_slice_state *ss = arg;
struct myri10ge_priv *mgp = ss->mgp;
struct mcp_irq_data *stats = ss->fw_stats;
struct myri10ge_tx_buf *tx = &ss->tx;
u32 send_done_count;
int i;
/* an interrupt on a non-zero receive-only slice is implicitly
* valid since MSI-X irqs are not shared */
if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
napi_schedule(&ss->napi);
return IRQ_HANDLED;
}
/* make sure it is our IRQ, and that the DMA has finished */
if (unlikely(!stats->valid))
return IRQ_NONE;
/* low bit indicates receives are present, so schedule
* napi poll handler */
if (stats->valid & 1)
napi_schedule(&ss->napi);
if (!mgp->msi_enabled && !mgp->msix_enabled) {
put_be32(0, mgp->irq_deassert);
if (!myri10ge_deassert_wait)
stats->valid = 0;
mb();
} else
stats->valid = 0;
/* Wait for IRQ line to go low, if using INTx */
i = 0;
while (1) {
i++;
/* check for transmit completes and receives */
send_done_count = ntohl(stats->send_done_count);
if (send_done_count != tx->pkt_done)
myri10ge_tx_done(ss, (int)send_done_count);
if (unlikely(i > myri10ge_max_irq_loops)) {
netdev_warn(mgp->dev, "irq stuck?\n");
stats->valid = 0;
schedule_work(&mgp->watchdog_work);
}
if (likely(stats->valid == 0))
break;
cpu_relax();
barrier();
}
/* Only slice 0 updates stats */
if (ss == mgp->ss)
myri10ge_check_statblock(mgp);
put_be32(htonl(3), ss->irq_claim + 1);
return IRQ_HANDLED;
}
static int
myri10ge_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
char *ptr;
int i;
cmd->base.autoneg = AUTONEG_DISABLE;
cmd->base.speed = SPEED_10000;
cmd->base.duplex = DUPLEX_FULL;
/*
* parse the product code to deterimine the interface type
* (CX4, XFP, Quad Ribbon Fiber) by looking at the character
* after the 3rd dash in the driver's cached copy of the
* EEPROM's product code string.
*/
ptr = mgp->product_code_string;
if (ptr == NULL) {
netdev_err(netdev, "Missing product code\n");
return 0;
}
for (i = 0; i < 3; i++, ptr++) {
ptr = strchr(ptr, '-');
if (ptr == NULL) {
netdev_err(netdev, "Invalid product code %s\n",
mgp->product_code_string);
return 0;
}
}
if (*ptr == '2')
ptr++;
if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
/* We've found either an XFP, quad ribbon fiber, or SFP+ */
cmd->base.port = PORT_FIBRE;
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
} else {
cmd->base.port = PORT_OTHER;
}
return 0;
}
static void
myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
strscpy(info->driver, "myri10ge", sizeof(info->driver));
strscpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
strscpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
}
static int myri10ge_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
coal->rx_coalesce_usecs = mgp->intr_coal_delay;
return 0;
}
static int myri10ge_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
mgp->intr_coal_delay = coal->rx_coalesce_usecs;
put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
return 0;
}
static void
myri10ge_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
pause->autoneg = 0;
pause->rx_pause = mgp->pause;
pause->tx_pause = mgp->pause;
}
static int
myri10ge_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
if (pause->tx_pause != mgp->pause)
return myri10ge_change_pause(mgp, pause->tx_pause);
if (pause->rx_pause != mgp->pause)
return myri10ge_change_pause(mgp, pause->rx_pause);
if (pause->autoneg != 0)
return -EINVAL;
return 0;
}
static void
myri10ge_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
ring->rx_jumbo_max_pending = 0;
ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
ring->rx_mini_pending = ring->rx_mini_max_pending;
ring->rx_pending = ring->rx_max_pending;
ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
ring->tx_pending = ring->tx_max_pending;
}
static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
"rx_length_errors", "rx_over_errors", "rx_crc_errors",
"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
"tx_heartbeat_errors", "tx_window_errors",
/* device-specific stats */
"tx_boundary", "irq", "MSI", "MSIX",
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
"serial_number", "watchdog_resets",
#ifdef CONFIG_MYRI10GE_DCA
"dca_capable_firmware", "dca_device_present",
#endif
"link_changes", "link_up", "dropped_link_overflow",
"dropped_link_error_or_filtered",
"dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
"dropped_unicast_filtered", "dropped_multicast_filtered",
"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
"dropped_no_big_buffer"
};
static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"----------- slice ---------",
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized",
};
#define MYRI10GE_NET_STATS_LEN 21
#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
static void
myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
int i;
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *myri10ge_gstrings_main_stats,
sizeof(myri10ge_gstrings_main_stats));
data += sizeof(myri10ge_gstrings_main_stats);
for (i = 0; i < mgp->num_slices; i++) {
memcpy(data, *myri10ge_gstrings_slice_stats,
sizeof(myri10ge_gstrings_slice_stats));
data += sizeof(myri10ge_gstrings_slice_stats);
}
break;
}
}
static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
switch (sset) {
case ETH_SS_STATS:
return MYRI10GE_MAIN_STATS_LEN +
mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void
myri10ge_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 * data)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
struct myri10ge_slice_state *ss;
struct rtnl_link_stats64 link_stats;
int slice;
int i;
/* force stats update */
memset(&link_stats, 0, sizeof(link_stats));
(void)myri10ge_get_stats(netdev, &link_stats);
for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
data[i] = ((u64 *)&link_stats)[i];
data[i++] = (unsigned int)mgp->tx_boundary;
data[i++] = (unsigned int)mgp->pdev->irq;
data[i++] = (unsigned int)mgp->msi_enabled;
data[i++] = (unsigned int)mgp->msix_enabled;
data[i++] = (unsigned int)mgp->read_dma;
data[i++] = (unsigned int)mgp->write_dma;
data[i++] = (unsigned int)mgp->read_write_dma;
data[i++] = (unsigned int)mgp->serial_number;
data[i++] = (unsigned int)mgp->watchdog_resets;
#ifdef CONFIG_MYRI10GE_DCA
data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
data[i++] = (unsigned int)(mgp->dca_enabled);
#endif
data[i++] = (unsigned int)mgp->link_changes;
/* firmware stats are useful only in the first slice */
ss = &mgp->ss[0];
data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
data[i++] =
(unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
data[i++] =
(unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
for (slice = 0; slice < mgp->num_slices; slice++) {
ss = &mgp->ss[slice];
data[i++] = slice;
data[i++] = (unsigned int)ss->tx.pkt_start;
data[i++] = (unsigned int)ss->tx.pkt_done;
data[i++] = (unsigned int)ss->tx.req;
data[i++] = (unsigned int)ss->tx.done;
data[i++] = (unsigned int)ss->rx_small.cnt;
data[i++] = (unsigned int)ss->rx_big.cnt;
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
}
}
static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
mgp->msg_enable = value;
}
static u32 myri10ge_get_msglevel(struct net_device *netdev)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
return mgp->msg_enable;
}
/*
* Use a low-level command to change the LED behavior. Rather than
* blinking (which is the normal case), when identify is used, the
* yellow LED turns solid.
*/
static int myri10ge_led(struct myri10ge_priv *mgp, int on)
{
struct mcp_gen_header *hdr;
struct device *dev = &mgp->pdev->dev;
size_t hdr_off, pattern_off, hdr_len;
u32 pattern = 0xfffffffe;
/* find running firmware header */
hdr_off = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
if ((hdr_off & 3) || hdr_off + sizeof(*hdr) > mgp->sram_size) {
dev_err(dev, "Running firmware has bad header offset (%d)\n",
(int)hdr_off);
return -EIO;
}
hdr_len = swab32(readl(mgp->sram + hdr_off +
offsetof(struct mcp_gen_header, header_length)));
pattern_off = hdr_off + offsetof(struct mcp_gen_header, led_pattern);
if (pattern_off >= (hdr_len + hdr_off)) {
dev_info(dev, "Firmware does not support LED identification\n");
return -EINVAL;
}
if (!on)
pattern = swab32(readl(mgp->sram + pattern_off + 4));
writel(swab32(pattern), mgp->sram + pattern_off);
return 0;
}
static int
myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
int rc;
switch (state) {
case ETHTOOL_ID_ACTIVE:
rc = myri10ge_led(mgp, 1);
break;
case ETHTOOL_ID_INACTIVE:
rc = myri10ge_led(mgp, 0);
break;
default:
rc = -EINVAL;
}
return rc;
}
static const struct ethtool_ops myri10ge_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = myri10ge_get_drvinfo,
.get_coalesce = myri10ge_get_coalesce,
.set_coalesce = myri10ge_set_coalesce,
.get_pauseparam = myri10ge_get_pauseparam,
.set_pauseparam = myri10ge_set_pauseparam,
.get_ringparam = myri10ge_get_ringparam,
.get_link = ethtool_op_get_link,
.get_strings = myri10ge_get_strings,
.get_sset_count = myri10ge_get_sset_count,
.get_ethtool_stats = myri10ge_get_ethtool_stats,
.set_msglevel = myri10ge_set_msglevel,
.get_msglevel = myri10ge_get_msglevel,
.set_phys_id = myri10ge_phys_id,
.get_link_ksettings = myri10ge_get_link_ksettings,
};
static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
{
struct myri10ge_priv *mgp = ss->mgp;
struct myri10ge_cmd cmd;
struct net_device *dev = mgp->dev;
int tx_ring_size, rx_ring_size;
int tx_ring_entries, rx_ring_entries;
int i, slice, status;
size_t bytes;
/* get ring sizes */
slice = ss - mgp->ss;
cmd.data0 = slice;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
tx_ring_size = cmd.data0;
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
if (status != 0)
return status;
rx_ring_size = cmd.data0;
tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
ss->tx.mask = tx_ring_entries - 1;
ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
status = -ENOMEM;
/* allocate the host shadow rings */
bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
* sizeof(*ss->tx.req_list);
ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
if (ss->tx.req_bytes == NULL)
goto abort_with_nothing;
/* ensure req_list entries are aligned to 8 bytes */
ss->tx.req_list = (struct mcp_kreq_ether_send *)
ALIGN((unsigned long)ss->tx.req_bytes, 8);
ss->tx.queue_active = 0;
bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_small.shadow == NULL)
goto abort_with_tx_req_bytes;
bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_big.shadow == NULL)
goto abort_with_rx_small_shadow;
/* allocate the host info rings */
bytes = tx_ring_entries * sizeof(*ss->tx.info);
ss->tx.info = kzalloc(bytes, GFP_KERNEL);
if (ss->tx.info == NULL)
goto abort_with_rx_big_shadow;
bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_small.info == NULL)
goto abort_with_tx_info;
bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_big.info == NULL)
goto abort_with_rx_small_info;
/* Fill the receive rings */
ss->rx_big.cnt = 0;
ss->rx_small.cnt = 0;
ss->rx_big.fill_cnt = 0;
ss->rx_small.fill_cnt = 0;
ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
ss->rx_small.watchdog_needed = 0;
ss->rx_big.watchdog_needed = 0;
if (mgp->small_bytes == 0) {
ss->rx_small.fill_cnt = ss->rx_small.mask + 1;
} else {
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
}
if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
slice, ss->rx_small.fill_cnt);
goto abort_with_rx_small_ring;
}
myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
slice, ss->rx_big.fill_cnt);
goto abort_with_rx_big_ring;
}
return 0;
abort_with_rx_big_ring:
for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
int idx = i & ss->rx_big.mask;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
mgp->big_bytes);
put_page(ss->rx_big.info[idx].page);
}
abort_with_rx_small_ring:
if (mgp->small_bytes == 0)
ss->rx_small.fill_cnt = ss->rx_small.cnt;
for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
int idx = i & ss->rx_small.mask;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
put_page(ss->rx_small.info[idx].page);
}
kfree(ss->rx_big.info);
abort_with_rx_small_info:
kfree(ss->rx_small.info);
abort_with_tx_info:
kfree(ss->tx.info);
abort_with_rx_big_shadow:
kfree(ss->rx_big.shadow);
abort_with_rx_small_shadow:
kfree(ss->rx_small.shadow);
abort_with_tx_req_bytes:
kfree(ss->tx.req_bytes);
ss->tx.req_bytes = NULL;
ss->tx.req_list = NULL;
abort_with_nothing:
return status;
}
static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct myri10ge_tx_buf *tx;
int i, len, idx;
/* If not allocated, skip it */
if (ss->tx.req_list == NULL)
return;
for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
idx = i & ss->rx_big.mask;
if (i == ss->rx_big.fill_cnt - 1)
ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
mgp->big_bytes);
put_page(ss->rx_big.info[idx].page);
}
if (mgp->small_bytes == 0)
ss->rx_small.fill_cnt = ss->rx_small.cnt;
for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
idx = i & ss->rx_small.mask;
if (i == ss->rx_small.fill_cnt - 1)
ss->rx_small.info[idx].page_offset =
MYRI10GE_ALLOC_SIZE;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
put_page(ss->rx_small.info[idx].page);
}
tx = &ss->tx;
while (tx->done != tx->req) {
idx = tx->done & tx->mask;
skb = tx->info[idx].skb;
/* Mark as free */
tx->info[idx].skb = NULL;
tx->done++;
len = dma_unmap_len(&tx->info[idx], len);
dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_dropped++;
dev_kfree_skb_any(skb);
if (len)
dma_unmap_single(&mgp->pdev->dev,
dma_unmap_addr(&tx->info[idx],
bus), len,
DMA_TO_DEVICE);
} else {
if (len)
dma_unmap_page(&mgp->pdev->dev,
dma_unmap_addr(&tx->info[idx],
bus), len,
DMA_TO_DEVICE);
}
}
kfree(ss->rx_big.info);
kfree(ss->rx_small.info);
kfree(ss->tx.info);
kfree(ss->rx_big.shadow);
kfree(ss->rx_small.shadow);
kfree(ss->tx.req_bytes);
ss->tx.req_bytes = NULL;
ss->tx.req_list = NULL;
}
static int myri10ge_request_irq(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
struct myri10ge_slice_state *ss;
struct net_device *netdev = mgp->dev;
int i;
int status;
mgp->msi_enabled = 0;
mgp->msix_enabled = 0;
status = 0;
if (myri10ge_msi) {
if (mgp->num_slices > 1) {
status = pci_enable_msix_range(pdev, mgp->msix_vectors,
mgp->num_slices, mgp->num_slices);
if (status < 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI-X\n", status);
return status;
}
mgp->msix_enabled = 1;
}
if (mgp->msix_enabled == 0) {
status = pci_enable_msi(pdev);
if (status != 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI; falling back to xPIC\n",
status);
} else {
mgp->msi_enabled = 1;
}
}
}
if (mgp->msix_enabled) {
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
snprintf(ss->irq_desc, sizeof(ss->irq_desc),
"%s:slice-%d", netdev->name, i);
status = request_irq(mgp->msix_vectors[i].vector,
myri10ge_intr, 0, ss->irq_desc,
ss);
if (status != 0) {
dev_err(&pdev->dev,
"slice %d failed to allocate IRQ\n", i);
i--;
while (i >= 0) {
free_irq(mgp->msix_vectors[i].vector,
&mgp->ss[i]);
i--;
}
pci_disable_msix(pdev);
return status;
}
}
} else {
status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
mgp->dev->name, &mgp->ss[0]);
if (status != 0) {
dev_err(&pdev->dev, "failed to allocate IRQ\n");
if (mgp->msi_enabled)
pci_disable_msi(pdev);
}
}
return status;
}
static void myri10ge_free_irq(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int i;
if (mgp->msix_enabled) {
for (i = 0; i < mgp->num_slices; i++)
free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
} else {
free_irq(pdev->irq, &mgp->ss[0]);
}
if (mgp->msi_enabled)
pci_disable_msi(pdev);
if (mgp->msix_enabled)
pci_disable_msix(pdev);
}
static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int status;
ss = &mgp->ss[slice];
status = 0;
if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
cmd.data0 = slice;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
&cmd, 0);
ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
(mgp->sram + cmd.data0);
}
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
&cmd, 0);
ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
(mgp->sram + cmd.data0);
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
(mgp->sram + cmd.data0);
ss->tx.send_go = (__iomem __be32 *)
(mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
ss->tx.send_stop = (__iomem __be32 *)
(mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
return status;
}
static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int status;
ss = &mgp->ss[slice];
cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
if (status == -ENOSYS) {
dma_addr_t bus = ss->fw_stats_bus;
if (slice != 0)
return -EINVAL;
bus += offsetof(struct mcp_irq_data, send_done_count);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
status = myri10ge_send_cmd(mgp,
MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
&cmd, 0);
/* Firmware cannot support multicast without STATS_DMA_V2 */
mgp->fw_multicast_support = 0;
} else {
mgp->fw_multicast_support = 1;
}
return 0;
}
static int myri10ge_open(struct net_device *dev)
{
struct myri10ge_slice_state *ss;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
int i, status, big_pow2, slice;
u8 __iomem *itable;
if (mgp->running != MYRI10GE_ETH_STOPPED)
return -EBUSY;
mgp->running = MYRI10GE_ETH_STARTING;
status = myri10ge_reset(mgp);
if (status != 0) {
netdev_err(dev, "failed reset\n");
goto abort_with_nothing;
}
if (mgp->num_slices > 1) {
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
if (mgp->dev->real_num_tx_queues > 1)
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to set number of slices\n");
goto abort_with_nothing;
}
/* setup the indirection table */
cmd.data0 = mgp->num_slices;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
&cmd, 0);
status |= myri10ge_send_cmd(mgp,
MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to setup rss tables\n");
goto abort_with_nothing;
}
/* just enable an identity mapping */
itable = mgp->sram + cmd.data0;
for (i = 0; i < mgp->num_slices; i++)
__raw_writeb(i, &itable[i]);
cmd.data0 = 1;
cmd.data1 = myri10ge_rss_hash;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to enable slices\n");
goto abort_with_nothing;
}
}
status = myri10ge_request_irq(mgp);
if (status != 0)
goto abort_with_nothing;
/* decide what small buffer size to use. For good TCP rx
* performance, it is important to not receive 1514 byte
* frames into jumbo buffers, as it confuses the socket buffer
* accounting code, leading to drops and erratic performance.
*/
if (dev->mtu <= ETH_DATA_LEN)
/* enough for a TCP header */
mgp->small_bytes = (128 > SMP_CACHE_BYTES)
? (128 - MXGEFW_PAD)
: (SMP_CACHE_BYTES - MXGEFW_PAD);
else
/* enough for a vlan encapsulated ETH_DATA_LEN frame */
mgp->small_bytes = VLAN_ETH_FRAME_LEN;
/* Override the small buffer size? */
if (myri10ge_small_bytes >= 0)
mgp->small_bytes = myri10ge_small_bytes;
/* Firmware needs the big buff size as a power of 2. Lie and
* tell him the buffer is larger, because we only use 1
* buffer/pkt, and the mtu will prevent overruns.
*/
big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
while (!is_power_of_2(big_pow2))
big_pow2++;
mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
} else {
big_pow2 = MYRI10GE_ALLOC_SIZE;
mgp->big_bytes = big_pow2;
}
/* setup the per-slice data structures */
for (slice = 0; slice < mgp->num_slices; slice++) {
ss = &mgp->ss[slice];
status = myri10ge_get_txrx(mgp, slice);
if (status != 0) {
netdev_err(dev, "failed to get ring sizes or locations\n");
goto abort_with_rings;
}
status = myri10ge_allocate_rings(ss);
if (status != 0)
goto abort_with_rings;
/* only firmware which supports multiple TX queues
* supports setting up the tx stats on non-zero
* slices */
if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
status = myri10ge_set_stats(mgp, slice);
if (status) {
netdev_err(dev, "Couldn't set stats DMA\n");
goto abort_with_rings;
}
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
/* now give firmware buffers sizes, and MTU */
cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
cmd.data0 = mgp->small_bytes;
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
cmd.data0 = big_pow2;
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
if (status) {
netdev_err(dev, "Couldn't set buffer sizes\n");
goto abort_with_rings;
}
/*
* Set Linux style TSO mode; this is needed only on newer
* firmware versions. Older versions default to Linux
* style TSO
*/
cmd.data0 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
if (status && status != -ENOSYS) {
netdev_err(dev, "Couldn't set TSO mode\n");
goto abort_with_rings;
}
mgp->link_state = ~0U;
mgp->rdma_tags_available = 15;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
if (status) {
netdev_err(dev, "Couldn't bring up link\n");
goto abort_with_rings;
}
mgp->running = MYRI10GE_ETH_RUNNING;
mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
add_timer(&mgp->watchdog_timer);
netif_tx_wake_all_queues(dev);
return 0;
abort_with_rings:
while (slice) {
slice--;
napi_disable(&mgp->ss[slice].napi);
}
for (i = 0; i < mgp->num_slices; i++)
myri10ge_free_rings(&mgp->ss[i]);
myri10ge_free_irq(mgp);
abort_with_nothing:
mgp->running = MYRI10GE_ETH_STOPPED;
return -ENOMEM;
}
static int myri10ge_close(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
int status, old_down_cnt;
int i;
if (mgp->running != MYRI10GE_ETH_RUNNING)
return 0;
if (mgp->ss[0].tx.req_bytes == NULL)
return 0;
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
for (i = 0; i < mgp->num_slices; i++)
napi_disable(&mgp->ss[i].napi);
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
if (mgp->rebooted == 0) {
old_down_cnt = mgp->down_cnt;
mb();
status =
myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
if (status)
netdev_err(dev, "Couldn't bring down link\n");
wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
HZ);
if (old_down_cnt == mgp->down_cnt)
netdev_err(dev, "never got down irq\n");
}
netif_tx_disable(dev);
myri10ge_free_irq(mgp);
for (i = 0; i < mgp->num_slices; i++)
myri10ge_free_rings(&mgp->ss[i]);
mgp->running = MYRI10GE_ETH_STOPPED;
return 0;
}
/* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
* backwards one at a time and handle ring wraps */
static inline void
myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
struct mcp_kreq_ether_send *src, int cnt)
{
int idx, starting_slot;
starting_slot = tx->req;
while (cnt > 1) {
cnt--;
idx = (starting_slot + cnt) & tx->mask;
myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
mb();
}
}
/*
* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
* at most 32 bytes at a time, so as to avoid involving the software
* pio handler in the nic. We re-write the first segment's flags
* to mark them valid only after writing the entire chain.
*/
static inline void
myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
int cnt)
{
int idx, i;
struct mcp_kreq_ether_send __iomem *dstp, *dst;
struct mcp_kreq_ether_send *srcp;
u8 last_flags;
idx = tx->req & tx->mask;
last_flags = src->flags;
src->flags = 0;
mb();
dst = dstp = &tx->lanai[idx];
srcp = src;
if ((idx + cnt) < tx->mask) {
for (i = 0; i < (cnt - 1); i += 2) {
myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
mb(); /* force write every 32 bytes */
srcp += 2;
dstp += 2;
}
} else {
/* submit all but the first request, and ensure
* that it is submitted below */
myri10ge_submit_req_backwards(tx, src, cnt);
i = 0;
}
if (i < cnt) {
/* submit the first request */
myri10ge_pio_copy(dstp, srcp, sizeof(*src));
mb(); /* barrier before setting valid flag */
}
/* re-write the last 32-bits with the valid flags */
src->flags = last_flags;
put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
tx->req += cnt;
mb();
}
static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
struct myri10ge_tx_buf *tx, int idx)
{
unsigned int len;
int last_idx;
/* Free any DMA resources we've alloced and clear out the skb slot */
last_idx = (idx + 1) & tx->mask;
idx = tx->req & tx->mask;
do {
len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
dma_unmap_single(&mgp->pdev->dev,
dma_unmap_addr(&tx->info[idx],
bus), len,
DMA_TO_DEVICE);
else
dma_unmap_page(&mgp->pdev->dev,
dma_unmap_addr(&tx->info[idx],
bus), len,
DMA_TO_DEVICE);
dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
}
/*
* Transmit a packet. We need to split the packet so that a single
* segment does not cross myri10ge->tx_boundary, so this makes segment
* counting tricky. So rather than try to count segments up front, we
* just give up if there are too few segments to hold a reasonably
* fragmented packet currently available. If we run
* out of segments while preparing a packet for DMA, we just linearize
* it and try again.
*/
static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
struct mcp_kreq_ether_send *req;
struct myri10ge_tx_buf *tx;
skb_frag_t *frag;
struct netdev_queue *netdev_queue;
dma_addr_t bus;
u32 low;
__be32 high_swapped;
unsigned int len;
int idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
u16 pseudo_hdr_offset, cksum_offset, queue;
int cum_len, seglen, boundary, rdma_count;
u8 flags, odd_flag;
queue = skb_get_queue_mapping(skb);
ss = &mgp->ss[queue];
netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
tx = &ss->tx;
again:
req = tx->req_list;
avail = tx->mask - 1 - (tx->req - tx->done);
mss = 0;
max_segments = MXGEFW_MAX_SEND_DESC;
if (skb_is_gso(skb)) {
mss = skb_shinfo(skb)->gso_size;
max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
}
if ((unlikely(avail < max_segments))) {
/* we are out of transmit resources */
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
return NETDEV_TX_BUSY;
}
/* Setup checksum offloading, if needed */
cksum_offset = 0;
pseudo_hdr_offset = 0;
odd_flag = 0;
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
cksum_offset = skb_checksum_start_offset(skb);
pseudo_hdr_offset = cksum_offset + skb->csum_offset;
/* If the headers are excessively large, then we must
* fall back to a software checksum */
if (unlikely(!mss && (cksum_offset > 255 ||
pseudo_hdr_offset > 127))) {
if (skb_checksum_help(skb))
goto drop;
cksum_offset = 0;
pseudo_hdr_offset = 0;
} else {
odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
flags |= MXGEFW_FLAGS_CKSUM;
}
}
cum_len = 0;
if (mss) { /* TSO */
/* this removes any CKSUM flag from before */
flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
/* negative cum_len signifies to the
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header can be at most 1KB long */
cum_len = -skb_tcp_all_headers(skb);
/* for IPv6 TSO, the checksum offset stores the
* TCP header length, to save the firmware from
* the need to parse the headers */
if (skb_is_gso_v6(skb)) {
cksum_offset = tcp_hdrlen(skb);
/* Can only handle headers <= max_tso6 long */
if (unlikely(-cum_len > mgp->max_tso6))
return myri10ge_sw_tso(skb, dev);
}
/* for TSO, pseudo_hdr_offset holds mss.
* The firmware figures out where to put
* the checksum by parsing the header. */
pseudo_hdr_offset = mss;
} else
/* Mark small packets, and pad out tiny packets */
if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
flags |= MXGEFW_FLAGS_SMALL;
/* pad frames to at least ETH_ZLEN bytes */
if (eth_skb_pad(skb)) {
/* The packet is gone, so we must
* return 0 */
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
}
/* map the skb for DMA */
len = skb_headlen(skb);
bus = dma_map_single(&mgp->pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus)))
goto drop;
idx = tx->req & tx->mask;
tx->info[idx].skb = skb;
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
frag_cnt = skb_shinfo(skb)->nr_frags;
frag_idx = 0;
count = 0;
rdma_count = 0;
/* "rdma_count" is the number of RDMAs belonging to the
* current packet BEFORE the current send request. For
* non-TSO packets, this is equal to "count".
* For TSO packets, rdma_count needs to be reset
* to 0 after a segment cut.
*
* The rdma_count field of the send request is
* the number of RDMAs of the packet starting at
* that request. For TSO send requests with one ore more cuts
* in the middle, this is the number of RDMAs starting
* after the last cut in the request. All previous
* segments before the last cut implicitly have 1 RDMA.
*
* Since the number of RDMAs is not known beforehand,
* it must be filled-in retroactively - after each
* segmentation cut or at the end of the entire packet.
*/
while (1) {
/* Break the SKB or Fragment up into pieces which
* do not cross mgp->tx_boundary */
low = MYRI10GE_LOWPART_TO_U32(bus);
high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
while (len) {
u8 flags_next;
int cum_len_next;
if (unlikely(count == max_segments))
goto abort_linearize;
boundary =
(low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
seglen = boundary - low;
if (seglen > len)
seglen = len;
flags_next = flags & ~MXGEFW_FLAGS_FIRST;
cum_len_next = cum_len + seglen;
if (mss) { /* TSO */
(req - rdma_count)->rdma_count = rdma_count + 1;
if (likely(cum_len >= 0)) { /* payload */
int next_is_first, chop;
chop = (cum_len_next > mss);
cum_len_next = cum_len_next % mss;
next_is_first = (cum_len_next == 0);
flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
flags_next |= next_is_first *
MXGEFW_FLAGS_FIRST;
rdma_count |= -(chop | next_is_first);
rdma_count += chop & ~next_is_first;
} else if (likely(cum_len_next >= 0)) { /* header ends */
int small;
rdma_count = -1;
cum_len_next = 0;
seglen = -cum_len;
small = (mss <= MXGEFW_SEND_SMALL_SIZE);
flags_next = MXGEFW_FLAGS_TSO_PLD |
MXGEFW_FLAGS_FIRST |
(small * MXGEFW_FLAGS_SMALL);
}
}
req->addr_high = high_swapped;
req->addr_low = htonl(low);
req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
req->pad = 0; /* complete solid 16-byte block; does this matter? */
req->rdma_count = 1;
req->length = htons(seglen);
req->cksum_offset = cksum_offset;
req->flags = flags | ((cum_len & 1) * odd_flag);
low += seglen;
len -= seglen;
cum_len = cum_len_next;
flags = flags_next;
req++;
count++;
rdma_count++;
if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
if (unlikely(cksum_offset > seglen))
cksum_offset -= seglen;
else
cksum_offset = 0;
}
}
if (frag_idx == frag_cnt)
break;
/* map next fragment for DMA */
frag = &skb_shinfo(skb)->frags[frag_idx];
frag_idx++;
len = skb_frag_size(frag);
bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) {
myri10ge_unmap_tx_dma(mgp, tx, idx);
goto drop;
}
idx = (count + tx->req) & tx->mask;
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
}
(req - rdma_count)->rdma_count = rdma_count;
if (mss)
do {
req--;
req->flags |= MXGEFW_FLAGS_TSO_LAST;
} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
MXGEFW_FLAGS_FIRST)));
idx = ((count - 1) + tx->req) & tx->mask;
tx->info[idx].last = 1;
myri10ge_submit_req(tx, tx->req_list, count);
/* if using multiple tx queues, make sure NIC polls the
* current slice */
if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
tx->queue_active = 1;
put_be32(htonl(1), tx->send_go);
mb();
}
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
}
return NETDEV_TX_OK;
abort_linearize:
myri10ge_unmap_tx_dma(mgp, tx, idx);
if (skb_is_gso(skb)) {
netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
goto drop;
}
if (skb_linearize(skb))
goto drop;
tx->linearized++;
goto again;
drop:
dev_kfree_skb_any(skb);
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev)
{
struct sk_buff *segs, *curr, *next;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
netdev_tx_t status;
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
if (IS_ERR(segs))
goto drop;
skb_list_walk_safe(segs, curr, next) {
skb_mark_not_on_list(curr);
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
skb_list_walk_safe(next, curr, next) {
curr->next = NULL;
dev_kfree_skb_any(curr);
}
goto drop;
}
}
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
drop:
ss = &mgp->ss[skb_get_queue_mapping(skb)];
dev_kfree_skb_any(skb);
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
static void myri10ge_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
const struct myri10ge_priv *mgp = netdev_priv(dev);
const struct myri10ge_slice_netstats *slice_stats;
int i;
for (i = 0; i < mgp->num_slices; i++) {
slice_stats = &mgp->ss[i].stats;
stats->rx_packets += slice_stats->rx_packets;
stats->tx_packets += slice_stats->tx_packets;
stats->rx_bytes += slice_stats->rx_bytes;
stats->tx_bytes += slice_stats->tx_bytes;
stats->rx_dropped += slice_stats->rx_dropped;
stats->tx_dropped += slice_stats->tx_dropped;
}
}
static void myri10ge_set_multicast_list(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
struct netdev_hw_addr *ha;
__be32 data[2] = { 0, 0 };
int err;
/* can be called from atomic contexts,
* pass 1 to force atomicity in myri10ge_send_cmd() */
myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
/* This firmware is known to not support multicast */
if (!mgp->fw_multicast_support)
return;
/* Disable multicast filtering */
err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
err);
goto abort;
}
if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
/* request to disable multicast filtering, so quit here */
return;
}
/* Flush the filters */
err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
&cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
err);
goto abort;
}
/* Walk the multicast list, and add each address */
netdev_for_each_mc_addr(ha, dev) {
memcpy(data, &ha->addr, ETH_ALEN);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
&cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
err, ha->addr);
goto abort;
}
}
/* Enable multicast filtering */
err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
err);
goto abort;
}
return;
abort:
return;
}
static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
struct myri10ge_priv *mgp = netdev_priv(dev);
int status;
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
status = myri10ge_update_mac_address(mgp, sa->sa_data);
if (status != 0) {
netdev_err(dev, "changing mac address failed with %d\n",
status);
return status;
}
/* change the dev structure */
eth_hw_addr_set(dev, sa->sa_data);
return 0;
}
static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
myri10ge_close(dev);
dev->mtu = new_mtu;
myri10ge_open(dev);
} else
dev->mtu = new_mtu;
return 0;
}
/*
* Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
* Only do it if the bridge is a root port since we don't want to disturb
* any other device, except if forced with myri10ge_ecrc_enable > 1.
*/
static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
{
struct pci_dev *bridge = mgp->pdev->bus->self;
struct device *dev = &mgp->pdev->dev;
int cap;
unsigned err_cap;
int ret;
if (!myri10ge_ecrc_enable || !bridge)
return;
/* check that the bridge is a root port */
if (pci_pcie_type(bridge) != PCI_EXP_TYPE_ROOT_PORT) {
if (myri10ge_ecrc_enable > 1) {
struct pci_dev *prev_bridge, *old_bridge = bridge;
/* Walk the hierarchy up to the root port
* where ECRC has to be enabled */
do {
prev_bridge = bridge;
bridge = bridge->bus->self;
if (!bridge || prev_bridge == bridge) {
dev_err(dev,
"Failed to find root port"
" to force ECRC\n");
return;
}
} while (pci_pcie_type(bridge) !=
PCI_EXP_TYPE_ROOT_PORT);
dev_info(dev,
"Forcing ECRC on non-root port %s"
" (enabling on root port %s)\n",
pci_name(old_bridge), pci_name(bridge));
} else {
dev_err(dev,
"Not enabling ECRC on non-root port %s\n",
pci_name(bridge));
return;
}
}
cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
if (!cap)
return;
ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
if (ret) {
dev_err(dev, "failed reading ext-conf-space of %s\n",
pci_name(bridge));
dev_err(dev, "\t pci=nommconf in use? "
"or buggy/incomplete/absent ACPI MCFG attr?\n");
return;
}
if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
return;
err_cap |= PCI_ERR_CAP_ECRC_GENE;
pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
}
/*
* The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
* when the PCI-E Completion packets are aligned on an 8-byte
* boundary. Some PCI-E chip sets always align Completion packets; on
* the ones that do not, the alignment can be enforced by enabling
* ECRC generation (if supported).
*
* When PCI-E Completion packets are not aligned, it is actually more
* efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
*
* If the driver can neither enable ECRC nor verify that it has
* already been enabled, then it must use a firmware image which works
* around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
* should also ensure that it never gives the device a Read-DMA which is
* larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
* enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
* firmware image, and set tx_boundary to 4KB.
*/
static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
struct device *dev = &pdev->dev;
int status;
mgp->tx_boundary = 4096;
/*
* Verify the max read request size was set to 4KB
* before trying the test with 4KB.
*/
status = pcie_get_readrq(pdev);
if (status < 0) {
dev_err(dev, "Couldn't read max read req size: %d\n", status);
goto abort;
}
if (status != 4096) {
dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
mgp->tx_boundary = 2048;
}
/*
* load the optimized firmware (which assumes aligned PCIe
* completions) in order to see if it works on this host.
*/
set_fw_name(mgp, myri10ge_fw_aligned, false);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
goto abort;
}
/*
* Enable ECRC if possible
*/
myri10ge_enable_ecrc(mgp);
/*
* Run a DMA test which watches for unaligned completions and
* aborts on the first one seen.
*/
status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
if (status == 0)
return; /* keep the aligned firmware */
if (status != -E2BIG)
dev_warn(dev, "DMA test failed: %d\n", status);
if (status == -ENOSYS)
dev_warn(dev, "Falling back to ethp! "
"Please install up to date fw\n");
abort:
/* fall back to using the unaligned firmware */
mgp->tx_boundary = 2048;
set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
{
int overridden = 0;
if (myri10ge_force_firmware == 0) {
int link_width;
u16 lnk;
pcie_capability_read_word(mgp->pdev, PCI_EXP_LNKSTA, &lnk);
link_width = (lnk >> 4) & 0x3f;
/* Check to see if Link is less than 8 or if the
* upstream bridge is known to provide aligned
* completions */
if (link_width < 8) {
dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
link_width);
mgp->tx_boundary = 4096;
set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
myri10ge_firmware_probe(mgp);
}
} else {
if (myri10ge_force_firmware == 1) {
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (forced)\n");
mgp->tx_boundary = 4096;
set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
dev_info(&mgp->pdev->dev,
"Assuming unaligned completions (forced)\n");
mgp->tx_boundary = 2048;
set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
}
kernel_param_lock(THIS_MODULE);
if (myri10ge_fw_name != NULL) {
char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
if (fw_name) {
overridden = 1;
set_fw_name(mgp, fw_name, true);
}
}
kernel_param_unlock(THIS_MODULE);
if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
myri10ge_fw_names[mgp->board_number] != NULL &&
strlen(myri10ge_fw_names[mgp->board_number])) {
set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
overridden = 1;
}
if (overridden)
dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
mgp->fw_name);
}
static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
{
struct pci_dev *bridge = pdev->bus->self;
int cap;
u32 mask;
if (bridge == NULL)
return;
cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
if (cap) {
/* a sram parity error can cause a surprise link
* down; since we expect and can recover from sram
* parity errors, mask surprise link down events */
pci_read_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, &mask);
mask |= 0x20;
pci_write_config_dword(bridge, cap + PCI_ERR_UNCOR_MASK, mask);
}
}
static int __maybe_unused myri10ge_suspend(struct device *dev)
{
struct myri10ge_priv *mgp;
struct net_device *netdev;
mgp = dev_get_drvdata(dev);
if (mgp == NULL)
return -EINVAL;
netdev = mgp->dev;
netif_device_detach(netdev);
if (netif_running(netdev)) {
netdev_info(netdev, "closing\n");
rtnl_lock();
myri10ge_close(netdev);
rtnl_unlock();
}
myri10ge_dummy_rdma(mgp, 0);
return 0;
}
static int __maybe_unused myri10ge_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct myri10ge_priv *mgp;
struct net_device *netdev;
int status;
u16 vendor;
mgp = pci_get_drvdata(pdev);
if (mgp == NULL)
return -EINVAL;
netdev = mgp->dev;
msleep(5); /* give card time to respond */
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
netdev_err(mgp->dev, "device disappeared!\n");
return -EIO;
}
myri10ge_reset(mgp);
myri10ge_dummy_rdma(mgp, 1);
if (netif_running(netdev)) {
rtnl_lock();
status = myri10ge_open(netdev);
rtnl_unlock();
if (status != 0)
goto abort_with_enabled;
}
netif_device_attach(netdev);
return 0;
abort_with_enabled:
return -EIO;
}
static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int vs = mgp->vendor_specific_offset;
u32 reboot;
/*enter read32 mode */
pci_write_config_byte(pdev, vs + 0x10, 0x3);
/*read REBOOT_STATUS (0xfffffff0) */
pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
pci_read_config_dword(pdev, vs + 0x14, &reboot);
return reboot;
}
static void
myri10ge_check_slice(struct myri10ge_slice_state *ss, int *reset_needed,
int *busy_slice_cnt, u32 rx_pause_cnt)
{
struct myri10ge_priv *mgp = ss->mgp;
int slice = ss - mgp->ss;
if (ss->tx.req != ss->tx.done &&
ss->tx.done == ss->watchdog_tx_done &&
ss->watchdog_tx_req != ss->watchdog_tx_done) {
/* nic seems like it might be stuck.. */
if (rx_pause_cnt != mgp->watchdog_pause) {
if (net_ratelimit())
netdev_warn(mgp->dev, "slice %d: TX paused, "
"check link partner\n", slice);
} else {
netdev_warn(mgp->dev,
"slice %d: TX stuck %d %d %d %d %d %d\n",
slice, ss->tx.queue_active, ss->tx.req,
ss->tx.done, ss->tx.pkt_start,
ss->tx.pkt_done,
(int)ntohl(mgp->ss[slice].fw_stats->
send_done_count));
*reset_needed = 1;
ss->stuck = 1;
}
}
if (ss->watchdog_tx_done != ss->tx.done ||
ss->watchdog_rx_done != ss->rx_done.cnt) {
*busy_slice_cnt += 1;
}
ss->watchdog_tx_done = ss->tx.done;
ss->watchdog_tx_req = ss->tx.req;
ss->watchdog_rx_done = ss->rx_done.cnt;
}
/*
* This watchdog is used to check whether the board has suffered
* from a parity error and needs to be recovered.
*/
static void myri10ge_watchdog(struct work_struct *work)
{
struct myri10ge_priv *mgp =
container_of(work, struct myri10ge_priv, watchdog_work);
struct myri10ge_slice_state *ss;
u32 reboot, rx_pause_cnt;
int status, rebooted;
int i;
int reset_needed = 0;
int busy_slice_cnt = 0;
u16 cmd, vendor;
mgp->watchdog_resets++;
pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
rebooted = 0;
if ((cmd & PCI_COMMAND_MASTER) == 0) {
/* Bus master DMA disabled? Check to see
* if the card rebooted due to a parity error
* For now, just report it */
reboot = myri10ge_read_reboot(mgp);
netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
reboot, myri10ge_reset_recover ? "" : " not");
if (myri10ge_reset_recover == 0)
return;
rtnl_lock();
mgp->rebooted = 1;
rebooted = 1;
myri10ge_close(mgp->dev);
myri10ge_reset_recover--;
mgp->rebooted = 0;
/*
* A rebooted nic will come back with config space as
* it was after power was applied to PCIe bus.
* Attempt to restore config space which was saved
* when the driver was loaded, or the last time the
* nic was resumed from power saving mode.
*/
pci_restore_state(mgp->pdev);
/* save state again for accounting reasons */
pci_save_state(mgp->pdev);
} else {
/* if we get back -1's from our slot, perhaps somebody
* powered off our card. Don't try to reset it in
* this case */
if (cmd == 0xffff) {
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
netdev_err(mgp->dev, "device disappeared!\n");
return;
}
}
/* Perhaps it is a software error. See if stuck slice
* has recovered, reset if not */
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
for (i = 0; i < mgp->num_slices; i++) {
ss = mgp->ss;
if (ss->stuck) {
myri10ge_check_slice(ss, &reset_needed,
&busy_slice_cnt,
rx_pause_cnt);
ss->stuck = 0;
}
}
if (!reset_needed) {
netdev_dbg(mgp->dev, "not resetting\n");
return;
}
netdev_err(mgp->dev, "device timeout, resetting\n");
}
if (!rebooted) {
rtnl_lock();
myri10ge_close(mgp->dev);
}
status = myri10ge_load_firmware(mgp, 1);
if (status != 0)
netdev_err(mgp->dev, "failed to load firmware\n");
else
myri10ge_open(mgp->dev);
rtnl_unlock();
}
/*
* We use our own timer routine rather than relying upon
* netdev->tx_timeout because we have a very large hardware transmit
* queue. Due to the large queue, the netdev->tx_timeout function
* cannot detect a NIC with a parity error in a timely fashion if the
* NIC is lightly loaded.
*/
static void myri10ge_watchdog_timer(struct timer_list *t)
{
struct myri10ge_priv *mgp;
struct myri10ge_slice_state *ss;
int i, reset_needed, busy_slice_cnt;
u32 rx_pause_cnt;
u16 cmd;
mgp = from_timer(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
for (i = 0, reset_needed = 0;
i < mgp->num_slices && reset_needed == 0; ++i) {
ss = &mgp->ss[i];
if (ss->rx_small.watchdog_needed) {
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD,
1);
if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
myri10ge_fill_thresh)
ss->rx_small.watchdog_needed = 0;
}
if (ss->rx_big.watchdog_needed) {
myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
mgp->big_bytes, 1);
if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
myri10ge_fill_thresh)
ss->rx_big.watchdog_needed = 0;
}
myri10ge_check_slice(ss, &reset_needed, &busy_slice_cnt,
rx_pause_cnt);
}
/* if we've sent or received no traffic, poll the NIC to
* ensure it is still there. Otherwise, we risk not noticing
* an error in a timely fashion */
if (busy_slice_cnt == 0) {
pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
if ((cmd & PCI_COMMAND_MASTER) == 0) {
reset_needed = 1;
}
}
mgp->watchdog_pause = rx_pause_cnt;
if (reset_needed) {
schedule_work(&mgp->watchdog_work);
} else {
/* rearm timer */
mod_timer(&mgp->watchdog_timer,
jiffies + myri10ge_watchdog_timeout * HZ);
}
}
static void myri10ge_free_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_slice_state *ss;
struct pci_dev *pdev = mgp->pdev;
size_t bytes;
int i;
if (mgp->ss == NULL)
return;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
if (ss->rx_done.entry != NULL) {
bytes = mgp->max_intr_slots *
sizeof(*ss->rx_done.entry);
dma_free_coherent(&pdev->dev, bytes,
ss->rx_done.entry, ss->rx_done.bus);
ss->rx_done.entry = NULL;
}
if (ss->fw_stats != NULL) {
bytes = sizeof(*ss->fw_stats);
dma_free_coherent(&pdev->dev, bytes,
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
}
__netif_napi_del(&ss->napi);
}
/* Wait till napi structs are no longer used, and then free ss. */
synchronize_net();
kfree(mgp->ss);
mgp->ss = NULL;
}
static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_slice_state *ss;
struct pci_dev *pdev = mgp->pdev;
size_t bytes;
int i;
bytes = sizeof(*mgp->ss) * mgp->num_slices;
mgp->ss = kzalloc(bytes, GFP_KERNEL);
if (mgp->ss == NULL) {
return -ENOMEM;
}
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
&ss->rx_done.bus,
GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
bytes = sizeof(*ss->fw_stats);
ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
&ss->fw_stats_bus,
GFP_KERNEL);
if (ss->fw_stats == NULL)
goto abort;
ss->mgp = mgp;
ss->dev = mgp->dev;
netif_napi_add_weight(ss->dev, &ss->napi, myri10ge_poll,
myri10ge_napi_weight);
}
return 0;
abort:
myri10ge_free_slices(mgp);
return -ENOMEM;
}
/*
* This function determines the number of slices supported.
* The number slices is the minimum of the number of CPUS,
* the number of MSI-X irqs supported, the number of slices
* supported by the firmware
*/
static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
bool old_allocated;
int i, status, ncpus;
mgp->num_slices = 1;
ncpus = netif_get_num_default_rss_queues();
if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
(myri10ge_max_slices == -1 && ncpus < 2))
return;
/* try to load the slice aware rss firmware */
old_fw = mgp->fw_name;
old_allocated = mgp->fw_name_allocated;
/* don't free old_fw if we override it. */
mgp->fw_name_allocated = false;
if (myri10ge_fw_name != NULL) {
dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
myri10ge_fw_name);
set_fw_name(mgp, myri10ge_fw_name, false);
} else if (old_fw == myri10ge_fw_aligned)
set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
else
set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
status = myri10ge_load_firmware(mgp, 0);
if (status != 0) {
dev_info(&pdev->dev, "Rss firmware not found\n");
if (old_allocated)
kfree(old_fw);
return;
}
/* hit the board with a reset to ensure it is alive */
memset(&cmd, 0, sizeof(cmd));
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed reset\n");
goto abort_with_fw;
}
mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
/* tell it the size of the interrupt queues */
cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
goto abort_with_fw;
}
/* ask the maximum number of slices it supports */
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
if (status != 0)
goto abort_with_fw;
else
mgp->num_slices = cmd.data0;
/* Only allow multiple slices if MSI-X is usable */
if (!myri10ge_msi) {
goto abort_with_fw;
}
/* if the admin did not specify a limit to how many
* slices we should use, cap it automatically to the
* number of CPUs currently online */
if (myri10ge_max_slices == -1)
myri10ge_max_slices = ncpus;
if (mgp->num_slices > myri10ge_max_slices)
mgp->num_slices = myri10ge_max_slices;
/* Now try to allocate as many MSI-X vectors as we have
* slices. We give up on MSI-X if we can only get a single
* vector. */
mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
GFP_KERNEL);
if (mgp->msix_vectors == NULL)
goto no_msix;
for (i = 0; i < mgp->num_slices; i++) {
mgp->msix_vectors[i].entry = i;
}
while (mgp->num_slices > 1) {
mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
if (mgp->num_slices == 1)
goto no_msix;
status = pci_enable_msix_range(pdev,
mgp->msix_vectors,
mgp->num_slices,
mgp->num_slices);
if (status < 0)
goto no_msix;
pci_disable_msix(pdev);
if (status == mgp->num_slices) {
if (old_allocated)
kfree(old_fw);
return;
} else {
mgp->num_slices = status;
}
}
no_msix:
if (mgp->msix_vectors != NULL) {
kfree(mgp->msix_vectors);
mgp->msix_vectors = NULL;
}
abort_with_fw:
mgp->num_slices = 1;
set_fw_name(mgp, old_fw, old_allocated);
myri10ge_load_firmware(mgp, 0);
}
static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_open = myri10ge_open,
.ndo_stop = myri10ge_close,
.ndo_start_xmit = myri10ge_xmit,
.ndo_get_stats64 = myri10ge_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
};
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
int status = -ENXIO;
unsigned hdr_offset, ss_offset;
static int board_number;
netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
if (netdev == NULL)
return -ENOMEM;
SET_NETDEV_DEV(netdev, &pdev->dev);
mgp = netdev_priv(netdev);
mgp->dev = netdev;
mgp->pdev = pdev;
mgp->pause = myri10ge_flow_control;
mgp->intr_coal_delay = myri10ge_intr_coal_delay;
mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
mgp->board_number = board_number;
init_waitqueue_head(&mgp->down_wq);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "pci_enable_device call failed\n");
status = -ENODEV;
goto abort_with_netdev;
}
/* Find the vendor-specific cap so we can check
* the reboot register later on */
mgp->vendor_specific_offset
= pci_find_capability(pdev, PCI_CAP_ID_VNDR);
/* Set our max read request to 4KB */
status = pcie_set_readrq(pdev, 4096);
if (status != 0) {
dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
status);
goto abort_with_enabled;
}
myri10ge_mask_surprise_down(pdev);
pci_set_master(pdev);
status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (status != 0) {
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
goto abort_with_enabled;
}
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
if (!mgp->cmd) {
status = -ENOMEM;
goto abort_with_enabled;
}
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
if (mgp->sram == NULL) {
dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
mgp->board_span, mgp->iomem_base);
status = -ENXIO;
goto abort_with_mtrr;
}
hdr_offset =
swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
mgp->sram_size = swab32(readl(mgp->sram + ss_offset));
if (mgp->sram_size > mgp->board_span ||
mgp->sram_size <= MYRI10GE_FW_OFFSET) {
dev_err(&pdev->dev,
"invalid sram_size %dB or board span %ldB\n",
mgp->sram_size, mgp->board_span);
status = -EINVAL;
goto abort_with_ioremap;
}
memcpy_fromio(mgp->eeprom_strings,
mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
status = myri10ge_read_mac_addr(mgp);
if (status)
goto abort_with_ioremap;
eth_hw_addr_set(netdev, mgp->mac_addr);
myri10ge_select_firmware(mgp);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
dev_err(&pdev->dev, "failed to load firmware\n");
goto abort_with_ioremap;
}
myri10ge_probe_slices(mgp);
status = myri10ge_alloc_slices(mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed to alloc slice state\n");
goto abort_with_firmware;
}
netif_set_real_num_tx_queues(netdev, mgp->num_slices);
netif_set_real_num_rx_queues(netdev, mgp->num_slices);
status = myri10ge_reset(mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed reset\n");
goto abort_with_slices;
}
#ifdef CONFIG_MYRI10GE_DCA
myri10ge_setup_dca(mgp);
#endif
pci_set_drvdata(pdev, mgp);
/* MTU range: 68 - 9000 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
if (myri10ge_initial_mtu > netdev->max_mtu)
myri10ge_initial_mtu = netdev->max_mtu;
if (myri10ge_initial_mtu < netdev->min_mtu)
myri10ge_initial_mtu = netdev->min_mtu;
netdev->mtu = myri10ge_initial_mtu;
netdev->netdev_ops = &myri10ge_netdev_ops;
netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
/* fake NETIF_F_HW_VLAN_CTAG_RX for good GRO performance */
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= mgp->features;
if (mgp->fw_ver_tiny < 37)
netdev->vlan_features &= ~NETIF_F_TSO6;
if (mgp->fw_ver_tiny < 32)
netdev->vlan_features &= ~NETIF_F_TSO;
/* make sure we can get an irq, and that MSI can be
* setup (if available). */
status = myri10ge_request_irq(mgp);
if (status != 0)
goto abort_with_slices;
myri10ge_free_irq(mgp);
/* Save configuration space to be restored if the
* nic resets due to a parity error */
pci_save_state(pdev);
/* Setup the watchdog timer */
timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
goto abort_with_state;
}
if (mgp->msix_enabled)
dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
(mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
else
dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
mgp->msi_enabled ? "MSI" : "xPIC",
pdev->irq, mgp->tx_boundary, mgp->fw_name,
(mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
board_number++;
return 0;
abort_with_state:
pci_restore_state(pdev);
abort_with_slices:
myri10ge_free_slices(mgp);
abort_with_firmware:
kfree(mgp->msix_vectors);
myri10ge_dummy_rdma(mgp, 0);
abort_with_ioremap:
if (mgp->mac_addr_string != NULL)
dev_err(&pdev->dev,
"myri10ge_probe() failed: MAC=%s, SN=%ld\n",
mgp->mac_addr_string, mgp->serial_number);
iounmap(mgp->sram);
abort_with_mtrr:
arch_phys_wc_del(mgp->wc_cookie);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
abort_with_enabled:
pci_disable_device(pdev);
abort_with_netdev:
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
return status;
}
/*
* myri10ge_remove
*
* Does what is necessary to shutdown one Myrinet device. Called
* once for each Myrinet card by the kernel when a module is
* unloaded.
*/
static void myri10ge_remove(struct pci_dev *pdev)
{
struct myri10ge_priv *mgp;
struct net_device *netdev;
mgp = pci_get_drvdata(pdev);
if (mgp == NULL)
return;
cancel_work_sync(&mgp->watchdog_work);
netdev = mgp->dev;
unregister_netdev(netdev);
#ifdef CONFIG_MYRI10GE_DCA
myri10ge_teardown_dca(mgp);
#endif
myri10ge_dummy_rdma(mgp, 0);
/* avoid a memory leak */
pci_restore_state(pdev);
iounmap(mgp->sram);
arch_phys_wc_del(mgp->wc_cookie);
myri10ge_free_slices(mgp);
kfree(mgp->msix_vectors);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
}
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
static const struct pci_device_id myri10ge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
{PCI_DEVICE
(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
{0},
};
MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
static SIMPLE_DEV_PM_OPS(myri10ge_pm_ops, myri10ge_suspend, myri10ge_resume);
static struct pci_driver myri10ge_driver = {
.name = "myri10ge",
.probe = myri10ge_probe,
.remove = myri10ge_remove,
.id_table = myri10ge_pci_tbl,
.driver.pm = &myri10ge_pm_ops,
};
#ifdef CONFIG_MYRI10GE_DCA
static int
myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
{
int err = driver_for_each_device(&myri10ge_driver.driver,
NULL, &event,
myri10ge_notify_dca_device);
if (err)
return NOTIFY_BAD;
return NOTIFY_DONE;
}
static struct notifier_block myri10ge_dca_notifier = {
.notifier_call = myri10ge_notify_dca,
.next = NULL,
.priority = 0,
};
#endif /* CONFIG_MYRI10GE_DCA */
static __init int myri10ge_init_module(void)
{
pr_info("Version %s\n", MYRI10GE_VERSION_STR);
if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
pr_err("Illegal rssh hash type %d, defaulting to source port\n",
myri10ge_rss_hash);
myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
}
#ifdef CONFIG_MYRI10GE_DCA
dca_register_notify(&myri10ge_dca_notifier);
#endif
if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
myri10ge_max_slices = MYRI10GE_MAX_SLICES;
return pci_register_driver(&myri10ge_driver);
}
module_init(myri10ge_init_module);
static __exit void myri10ge_cleanup_module(void)
{
#ifdef CONFIG_MYRI10GE_DCA
dca_unregister_notify(&myri10ge_dca_notifier);
#endif
pci_unregister_driver(&myri10ge_driver);
}
module_exit(myri10ge_cleanup_module);
|
linux-master
|
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include "spl2sw_define.h"
#include "spl2sw_desc.h"
void spl2sw_rx_descs_flush(struct spl2sw_common *comm)
{
struct spl2sw_skb_info *rx_skbinfo;
struct spl2sw_mac_desc *rx_desc;
u32 i, j;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
rx_desc = comm->rx_desc[i];
rx_skbinfo = comm->rx_skb_info[i];
for (j = 0; j < comm->rx_desc_num[i]; j++) {
rx_desc[j].addr1 = rx_skbinfo[j].mapping;
rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
RXD_EOR | comm->rx_desc_buff_size :
comm->rx_desc_buff_size;
wmb(); /* Set RXD_OWN after other fields are ready. */
rx_desc[j].cmd1 = RXD_OWN;
}
}
}
void spl2sw_tx_descs_clean(struct spl2sw_common *comm)
{
u32 i;
if (!comm->tx_desc)
return;
for (i = 0; i < TX_DESC_NUM; i++) {
comm->tx_desc[i].cmd1 = 0;
wmb(); /* Clear TXD_OWN and then set other fields. */
comm->tx_desc[i].cmd2 = 0;
comm->tx_desc[i].addr1 = 0;
comm->tx_desc[i].addr2 = 0;
if (comm->tx_temp_skb_info[i].mapping) {
dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping,
comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE);
comm->tx_temp_skb_info[i].mapping = 0;
}
if (comm->tx_temp_skb_info[i].skb) {
dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb);
comm->tx_temp_skb_info[i].skb = NULL;
}
}
}
void spl2sw_rx_descs_clean(struct spl2sw_common *comm)
{
struct spl2sw_skb_info *rx_skbinfo;
struct spl2sw_mac_desc *rx_desc;
u32 i, j;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
if (!comm->rx_skb_info[i])
continue;
rx_desc = comm->rx_desc[i];
rx_skbinfo = comm->rx_skb_info[i];
for (j = 0; j < comm->rx_desc_num[i]; j++) {
rx_desc[j].cmd1 = 0;
wmb(); /* Clear RXD_OWN and then set other fields. */
rx_desc[j].cmd2 = 0;
rx_desc[j].addr1 = 0;
if (rx_skbinfo[j].skb) {
dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping,
comm->rx_desc_buff_size, DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_skbinfo[j].skb);
rx_skbinfo[j].skb = NULL;
rx_skbinfo[j].mapping = 0;
}
}
kfree(rx_skbinfo);
comm->rx_skb_info[i] = NULL;
}
}
void spl2sw_descs_clean(struct spl2sw_common *comm)
{
spl2sw_rx_descs_clean(comm);
spl2sw_tx_descs_clean(comm);
}
void spl2sw_descs_free(struct spl2sw_common *comm)
{
u32 i;
spl2sw_descs_clean(comm);
comm->tx_desc = NULL;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
comm->rx_desc[i] = NULL;
/* Free descriptor area */
if (comm->desc_base) {
dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base,
comm->desc_dma);
comm->desc_base = NULL;
comm->desc_dma = 0;
comm->desc_size = 0;
}
}
void spl2sw_tx_descs_init(struct spl2sw_common *comm)
{
memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) *
(TX_DESC_NUM + MAC_GUARD_DESC_NUM));
}
int spl2sw_rx_descs_init(struct spl2sw_common *comm)
{
struct spl2sw_skb_info *rx_skbinfo;
struct spl2sw_mac_desc *rx_desc;
struct sk_buff *skb;
u32 mapping;
u32 i, j;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo),
GFP_KERNEL | GFP_DMA);
if (!comm->rx_skb_info[i])
goto mem_alloc_fail;
rx_skbinfo = comm->rx_skb_info[i];
rx_desc = comm->rx_desc[i];
for (j = 0; j < comm->rx_desc_num[i]; j++) {
skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
if (!skb)
goto mem_alloc_fail;
rx_skbinfo[j].skb = skb;
mapping = dma_map_single(&comm->pdev->dev, skb->data,
comm->rx_desc_buff_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(&comm->pdev->dev, mapping))
goto mem_alloc_fail;
rx_skbinfo[j].mapping = mapping;
rx_desc[j].addr1 = mapping;
rx_desc[j].addr2 = 0;
rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
RXD_EOR | comm->rx_desc_buff_size :
comm->rx_desc_buff_size;
wmb(); /* Set RXD_OWN after other fields are effective. */
rx_desc[j].cmd1 = RXD_OWN;
}
}
return 0;
mem_alloc_fail:
spl2sw_rx_descs_clean(comm);
return -ENOMEM;
}
int spl2sw_descs_alloc(struct spl2sw_common *comm)
{
s32 desc_size;
u32 i;
/* Alloc descriptor area */
desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc);
for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc);
comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma,
GFP_KERNEL);
if (!comm->desc_base)
return -ENOMEM;
comm->desc_size = desc_size;
/* Setup Tx descriptor */
comm->tx_desc = comm->desc_base;
/* Setup Rx descriptor */
comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM];
for (i = 1; i < RX_DESC_QUEUE_NUM; i++)
comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1];
return 0;
}
int spl2sw_descs_init(struct spl2sw_common *comm)
{
u32 i, ret;
/* Initialize rx descriptor's data */
comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM;
comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
comm->rx_desc[i] = NULL;
comm->rx_skb_info[i] = NULL;
comm->rx_pos[i] = 0;
}
comm->rx_desc_buff_size = MAC_RX_LEN_MAX;
/* Initialize tx descriptor's data */
comm->tx_done_pos = 0;
comm->tx_desc = NULL;
comm->tx_pos = 0;
comm->tx_desc_full = 0;
for (i = 0; i < TX_DESC_NUM; i++)
comm->tx_temp_skb_info[i].skb = NULL;
/* Allocate tx & rx descriptors. */
ret = spl2sw_descs_alloc(comm);
if (ret)
return ret;
spl2sw_tx_descs_init(comm);
return spl2sw_rx_descs_init(comm);
}
|
linux-master
|
drivers/net/ethernet/sunplus/spl2sw_desc.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/of_mdio.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_desc.h"
#include "spl2sw_mac.h"
void spl2sw_mac_hw_stop(struct spl2sw_common *comm)
{
u32 reg;
if (comm->enable == 0) {
/* Mask and clear all interrupts. */
writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
writel(0xffffffff, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
/* Disable cpu 0 and cpu 1. */
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU;
writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
}
/* Disable LAN ports. */
reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
reg |= FIELD_PREP(MAC_DIS_PORT, ~comm->enable);
writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
}
void spl2sw_mac_hw_start(struct spl2sw_common *comm)
{
u32 reg;
/* Enable cpu port 0 (6) & CRC padding (8) */
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
reg &= ~MAC_DIS_SOC0_CPU;
reg |= MAC_EN_CRC_SOC0;
writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
/* Enable port 0 & port 1 */
reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
reg &= FIELD_PREP(MAC_DIS_PORT, ~comm->enable) | ~MAC_DIS_PORT;
writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
}
int spl2sw_mac_addr_add(struct spl2sw_mac *mac)
{
struct spl2sw_common *comm = mac->comm;
u32 reg;
int ret;
/* Write 6-octet MAC address. */
writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8),
comm->l2sw_reg_base + L2SW_W_MAC_15_0);
writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) +
(mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24),
comm->l2sw_reg_base + L2SW_W_MAC_47_16);
/* Set learn port = cpu_port, aging = 1 */
reg = MAC_W_CPU_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) |
FIELD_PREP(MAC_W_AGE, 1) | MAC_W_MAC_CMD;
writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
/* Wait for completing. */
ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true,
comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
if (ret) {
netdev_err(mac->ndev, "Failed to add address to table!\n");
return ret;
}
netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n",
readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0),
(u32)FIELD_GET(MAC_W_MAC_47_16,
readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)),
(u32)FIELD_GET(MAC_W_MAC_15_0,
readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0)));
return 0;
}
int spl2sw_mac_addr_del(struct spl2sw_mac *mac)
{
struct spl2sw_common *comm = mac->comm;
u32 reg;
int ret;
/* Write 6-octet MAC address. */
writel((mac->mac_addr[0] << 0) + (mac->mac_addr[1] << 8),
comm->l2sw_reg_base + L2SW_W_MAC_15_0);
writel((mac->mac_addr[2] << 0) + (mac->mac_addr[3] << 8) +
(mac->mac_addr[4] << 16) + (mac->mac_addr[5] << 24),
comm->l2sw_reg_base + L2SW_W_MAC_47_16);
/* Set learn port = lan_port0 and aging = 0
* to wipe (age) out the entry.
*/
reg = MAC_W_LAN_PORT_0 | FIELD_PREP(MAC_W_VID, mac->vlan_id) | MAC_W_MAC_CMD;
writel(reg, comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
/* Wait for completing. */
ret = read_poll_timeout(readl, reg, reg & MAC_W_MAC_DONE, 1, 200, true,
comm->l2sw_reg_base + L2SW_WT_MAC_AD0);
if (ret) {
netdev_err(mac->ndev, "Failed to delete address from table!\n");
return ret;
}
netdev_dbg(mac->ndev, "mac_ad0 = %08x, mac_ad = %08x%04x\n",
readl(comm->l2sw_reg_base + L2SW_WT_MAC_AD0),
(u32)FIELD_GET(MAC_W_MAC_47_16,
readl(comm->l2sw_reg_base + L2SW_W_MAC_47_16)),
(u32)FIELD_GET(MAC_W_MAC_15_0,
readl(comm->l2sw_reg_base + L2SW_W_MAC_15_0)));
return 0;
}
void spl2sw_mac_hw_init(struct spl2sw_common *comm)
{
u32 reg;
/* Disable cpu0 and cpu 1 port. */
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
reg |= MAC_DIS_SOC1_CPU | MAC_DIS_SOC0_CPU;
writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
/* Set base addresses of TX and RX queues. */
writel(comm->desc_dma, comm->l2sw_reg_base + L2SW_TX_LBASE_ADDR_0);
writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * TX_DESC_NUM,
comm->l2sw_reg_base + L2SW_TX_HBASE_ADDR_0);
writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM +
MAC_GUARD_DESC_NUM), comm->l2sw_reg_base + L2SW_RX_HBASE_ADDR_0);
writel(comm->desc_dma + sizeof(struct spl2sw_mac_desc) * (TX_DESC_NUM +
MAC_GUARD_DESC_NUM + RX_QUEUE0_DESC_NUM),
comm->l2sw_reg_base + L2SW_RX_LBASE_ADDR_0);
/* Fc_rls_th=0x4a, Fc_set_th=0x3a, Drop_rls_th=0x2d, Drop_set_th=0x1d */
writel(0x4a3a2d1d, comm->l2sw_reg_base + L2SW_FL_CNTL_TH);
/* Cpu_rls_th=0x4a, Cpu_set_th=0x3a, Cpu_th=0x12, Port_th=0x12 */
writel(0x4a3a1212, comm->l2sw_reg_base + L2SW_CPU_FL_CNTL_TH);
/* mtcc_lmt=0xf, Pri_th_l=6, Pri_th_h=6, weigh_8x_en=1 */
writel(0xf6680000, comm->l2sw_reg_base + L2SW_PRI_FL_CNTL);
/* High-active LED */
reg = readl(comm->l2sw_reg_base + L2SW_LED_PORT0);
reg |= MAC_LED_ACT_HI;
writel(reg, comm->l2sw_reg_base + L2SW_LED_PORT0);
/* Disable aging of cpu port 0 & 1.
* Disable SA learning of cpu port 0 & 1.
* Enable UC and MC packets
*/
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
reg &= ~(MAC_EN_SOC1_AGING | MAC_EN_SOC0_AGING |
MAC_DIS_BC2CPU_P1 | MAC_DIS_BC2CPU_P0 |
MAC_DIS_MC2CPU_P1 | MAC_DIS_MC2CPU_P0);
reg |= MAC_DIS_LRN_SOC1 | MAC_DIS_LRN_SOC0;
writel(reg, comm->l2sw_reg_base + L2SW_CPU_CNTL);
/* Enable RMC2CPU for port 0 & 1
* Enable Flow control for port 0 & 1
* Enable Back pressure for port 0 & 1
*/
reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL0);
reg &= ~(MAC_DIS_RMC2CPU_P1 | MAC_DIS_RMC2CPU_P0);
reg |= MAC_EN_FLOW_CTL_P1 | MAC_EN_FLOW_CTL_P0 |
MAC_EN_BACK_PRESS_P1 | MAC_EN_BACK_PRESS_P0;
writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL0);
/* Disable LAN port SA learning. */
reg = readl(comm->l2sw_reg_base + L2SW_PORT_CNTL1);
reg |= MAC_DIS_SA_LRN_P1 | MAC_DIS_SA_LRN_P0;
writel(reg, comm->l2sw_reg_base + L2SW_PORT_CNTL1);
/* Enable rmii force mode and
* set both external phy-address to 31.
*/
reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
reg &= ~(MAC_EXT_PHY1_ADDR | MAC_EXT_PHY0_ADDR);
reg |= FIELD_PREP(MAC_EXT_PHY1_ADDR, 31) | FIELD_PREP(MAC_EXT_PHY0_ADDR, 31);
reg |= MAC_FORCE_RMII_EN_1 | MAC_FORCE_RMII_EN_0;
writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
/* Port 0: VLAN group 0
* Port 1: VLAN group 1
*/
reg = FIELD_PREP(MAC_P1_PVID, 1) | FIELD_PREP(MAC_P0_PVID, 0);
writel(reg, comm->l2sw_reg_base + L2SW_PVID_CONFIG0);
/* VLAN group 0: cpu0 (bit3) + port0 (bit0) = 1001 = 0x9
* VLAN group 1: cpu0 (bit3) + port1 (bit1) = 1010 = 0xa
*/
reg = FIELD_PREP(MAC_VLAN_MEMSET_1, 0xa) | FIELD_PREP(MAC_VLAN_MEMSET_0, 9);
writel(reg, comm->l2sw_reg_base + L2SW_VLAN_MEMSET_CONFIG0);
/* RMC forward: to_cpu (1)
* LED: 60mS (1)
* BC storm prev: 31 BC (1)
*/
reg = readl(comm->l2sw_reg_base + L2SW_SW_GLB_CNTL);
reg &= ~(MAC_RMC_TB_FAULT_RULE | MAC_LED_FLASH_TIME | MAC_BC_STORM_PREV);
reg |= FIELD_PREP(MAC_RMC_TB_FAULT_RULE, 1) |
FIELD_PREP(MAC_LED_FLASH_TIME, 1) |
FIELD_PREP(MAC_BC_STORM_PREV, 1);
writel(reg, comm->l2sw_reg_base + L2SW_SW_GLB_CNTL);
writel(MAC_INT_MASK_DEF, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
}
void spl2sw_mac_rx_mode_set(struct spl2sw_mac *mac)
{
struct spl2sw_common *comm = mac->comm;
struct net_device *ndev = mac->ndev;
u32 mask, reg, rx_mode;
netdev_dbg(ndev, "ndev->flags = %08x\n", ndev->flags);
mask = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) |
FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port);
reg = readl(comm->l2sw_reg_base + L2SW_CPU_CNTL);
if (ndev->flags & IFF_PROMISC) {
/* Allow MC and unknown UC packets */
rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port) |
FIELD_PREP(MAC_DIS_UN2CPU, mac->lan_port);
} else if ((!netdev_mc_empty(ndev) && (ndev->flags & IFF_MULTICAST)) ||
(ndev->flags & IFF_ALLMULTI)) {
/* Allow MC packets */
rx_mode = FIELD_PREP(MAC_DIS_MC2CPU, mac->lan_port);
} else {
/* Disable MC and unknown UC packets */
rx_mode = 0;
}
writel((reg & (~mask)) | ((~rx_mode) & mask), comm->l2sw_reg_base + L2SW_CPU_CNTL);
netdev_dbg(ndev, "cpu_cntl = %08x\n", readl(comm->l2sw_reg_base + L2SW_CPU_CNTL));
}
void spl2sw_mac_init(struct spl2sw_common *comm)
{
u32 i;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
comm->rx_pos[i] = 0;
mb(); /* make sure settings are effective. */
spl2sw_mac_hw_init(comm);
}
void spl2sw_mac_soft_reset(struct spl2sw_common *comm)
{
u32 i;
spl2sw_mac_hw_stop(comm);
spl2sw_rx_descs_flush(comm);
comm->tx_pos = 0;
comm->tx_done_pos = 0;
comm->tx_desc_full = 0;
for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
comm->rx_pos[i] = 0;
mb(); /* make sure settings are effective. */
spl2sw_mac_hw_init(comm);
spl2sw_mac_hw_start(comm);
}
|
linux-master
|
drivers/net/ethernet/sunplus/spl2sw_mac.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/nvmem-consumer.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/of_net.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/of.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_desc.h"
#include "spl2sw_mdio.h"
#include "spl2sw_phy.h"
#include "spl2sw_int.h"
#include "spl2sw_mac.h"
/* net device operations */
static int spl2sw_ethernet_open(struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
u32 mask;
netdev_dbg(ndev, "Open port = %x\n", mac->lan_port);
comm->enable |= mac->lan_port;
spl2sw_mac_hw_start(comm);
/* Enable TX and RX interrupts */
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask &= ~(MAC_INT_TX | MAC_INT_RX);
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
phy_start(ndev->phydev);
netif_start_queue(ndev);
return 0;
}
static int spl2sw_ethernet_stop(struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
netif_stop_queue(ndev);
comm->enable &= ~mac->lan_port;
phy_stop(ndev->phydev);
spl2sw_mac_hw_stop(comm);
return 0;
}
static netdev_tx_t spl2sw_ethernet_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
struct spl2sw_skb_info *skbinfo;
struct spl2sw_mac_desc *txdesc;
unsigned long flags;
u32 mapping;
u32 tx_pos;
u32 cmd1;
u32 cmd2;
if (unlikely(comm->tx_desc_full == 1)) {
/* No TX descriptors left. Wait for tx interrupt. */
netdev_dbg(ndev, "TX descriptor queue full when xmit!\n");
return NETDEV_TX_BUSY;
}
/* If skb size is shorter than ETH_ZLEN (60), pad it with 0. */
if (unlikely(skb->len < ETH_ZLEN)) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
skb_put(skb, ETH_ZLEN - skb->len);
}
mapping = dma_map_single(&comm->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&comm->pdev->dev, mapping)) {
ndev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
spin_lock_irqsave(&comm->tx_lock, flags);
tx_pos = comm->tx_pos;
txdesc = &comm->tx_desc[tx_pos];
skbinfo = &comm->tx_temp_skb_info[tx_pos];
skbinfo->mapping = mapping;
skbinfo->len = skb->len;
skbinfo->skb = skb;
/* Set up a TX descriptor */
cmd1 = TXD_OWN | TXD_SOP | TXD_EOP | (mac->to_vlan << 12) |
(skb->len & TXD_PKT_LEN);
cmd2 = skb->len & TXD_BUF_LEN1;
if (tx_pos == (TX_DESC_NUM - 1))
cmd2 |= TXD_EOR;
txdesc->addr1 = skbinfo->mapping;
txdesc->cmd2 = cmd2;
wmb(); /* Set TXD_OWN after other fields are effective. */
txdesc->cmd1 = cmd1;
/* Move tx_pos to next position */
tx_pos = ((tx_pos + 1) == TX_DESC_NUM) ? 0 : tx_pos + 1;
if (unlikely(tx_pos == comm->tx_done_pos)) {
netif_stop_queue(ndev);
comm->tx_desc_full = 1;
}
comm->tx_pos = tx_pos;
wmb(); /* make sure settings are effective. */
/* Trigger mac to transmit */
writel(MAC_TRIG_L_SOC0, comm->l2sw_reg_base + L2SW_CPU_TX_TRIG);
spin_unlock_irqrestore(&comm->tx_lock, flags);
return NETDEV_TX_OK;
}
static void spl2sw_ethernet_set_rx_mode(struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
spl2sw_mac_rx_mode_set(mac);
}
static int spl2sw_ethernet_set_mac_address(struct net_device *ndev, void *addr)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
int err;
err = eth_mac_addr(ndev, addr);
if (err)
return err;
/* Delete the old MAC address */
netdev_dbg(ndev, "Old Ethernet (MAC) address = %pM\n", mac->mac_addr);
if (is_valid_ether_addr(mac->mac_addr)) {
err = spl2sw_mac_addr_del(mac);
if (err)
return err;
}
/* Set the MAC address */
ether_addr_copy(mac->mac_addr, ndev->dev_addr);
return spl2sw_mac_addr_add(mac);
}
static void spl2sw_ethernet_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct spl2sw_common *comm = mac->comm;
unsigned long flags;
int i;
netdev_err(ndev, "TX timed out!\n");
ndev->stats.tx_errors++;
spin_lock_irqsave(&comm->tx_lock, flags);
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i])
netif_stop_queue(comm->ndev[i]);
spl2sw_mac_soft_reset(comm);
/* Accept TX packets again. */
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
netif_trans_update(comm->ndev[i]);
netif_wake_queue(comm->ndev[i]);
}
spin_unlock_irqrestore(&comm->tx_lock, flags);
}
static const struct net_device_ops netdev_ops = {
.ndo_open = spl2sw_ethernet_open,
.ndo_stop = spl2sw_ethernet_stop,
.ndo_start_xmit = spl2sw_ethernet_start_xmit,
.ndo_set_rx_mode = spl2sw_ethernet_set_rx_mode,
.ndo_set_mac_address = spl2sw_ethernet_set_mac_address,
.ndo_do_ioctl = phy_do_ioctl,
.ndo_tx_timeout = spl2sw_ethernet_tx_timeout,
};
static void spl2sw_check_mac_vendor_id_and_convert(u8 *mac_addr)
{
/* Byte order of MAC address of some samples are reversed.
* Check vendor id and convert byte order if it is wrong.
* OUI of Sunplus: fc:4b:bc
*/
if (mac_addr[5] == 0xfc && mac_addr[4] == 0x4b && mac_addr[3] == 0xbc &&
(mac_addr[0] != 0xfc || mac_addr[1] != 0x4b || mac_addr[2] != 0xbc)) {
swap(mac_addr[0], mac_addr[5]);
swap(mac_addr[1], mac_addr[4]);
swap(mac_addr[2], mac_addr[3]);
}
}
static int spl2sw_nvmem_get_mac_address(struct device *dev, struct device_node *np,
void *addrbuf)
{
struct nvmem_cell *cell;
ssize_t len;
u8 *mac;
/* Get nvmem cell of mac-address from dts. */
cell = of_nvmem_cell_get(np, "mac-address");
if (IS_ERR(cell))
return PTR_ERR(cell);
/* Read mac address from nvmem cell. */
mac = nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
if (IS_ERR(mac))
return PTR_ERR(mac);
if (len != ETH_ALEN) {
kfree(mac);
dev_info(dev, "Invalid length of mac address in nvmem!\n");
return -EINVAL;
}
/* Byte order of some samples are reversed.
* Convert byte order here.
*/
spl2sw_check_mac_vendor_id_and_convert(mac);
/* Check if mac address is valid */
if (!is_valid_ether_addr(mac)) {
dev_info(dev, "Invalid mac address in nvmem (%pM)!\n", mac);
kfree(mac);
return -EINVAL;
}
ether_addr_copy(addrbuf, mac);
kfree(mac);
return 0;
}
static u32 spl2sw_init_netdev(struct platform_device *pdev, u8 *mac_addr,
struct net_device **r_ndev)
{
struct net_device *ndev;
struct spl2sw_mac *mac;
int ret;
/* Allocate the devices, and also allocate spl2sw_mac,
* we can get it by netdev_priv().
*/
ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*mac));
if (!ndev) {
*r_ndev = NULL;
return -ENOMEM;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &netdev_ops;
mac = netdev_priv(ndev);
mac->ndev = ndev;
ether_addr_copy(mac->mac_addr, mac_addr);
eth_hw_addr_set(ndev, mac_addr);
dev_info(&pdev->dev, "Ethernet (MAC) address = %pM\n", mac_addr);
ret = register_netdev(ndev);
if (ret) {
dev_err(&pdev->dev, "Failed to register net device \"%s\"!\n",
ndev->name);
*r_ndev = NULL;
return ret;
}
netdev_dbg(ndev, "Registered net device \"%s\" successfully.\n", ndev->name);
*r_ndev = ndev;
return 0;
}
static struct device_node *spl2sw_get_eth_child_node(struct device_node *ether_np, int id)
{
struct device_node *port_np;
int port_id;
for_each_child_of_node(ether_np, port_np) {
/* It is not a 'port' node, continue. */
if (strcmp(port_np->name, "port"))
continue;
if (of_property_read_u32(port_np, "reg", &port_id) < 0)
continue;
if (port_id == id)
return port_np;
}
/* Not found! */
return NULL;
}
static int spl2sw_probe(struct platform_device *pdev)
{
struct device_node *eth_ports_np;
struct device_node *port_np;
struct spl2sw_common *comm;
struct device_node *phy_np;
phy_interface_t phy_mode;
struct net_device *ndev;
struct spl2sw_mac *mac;
u8 mac_addr[ETH_ALEN];
int irq, i, ret;
if (platform_get_drvdata(pdev))
return -ENODEV;
/* Allocate memory for 'spl2sw_common' area. */
comm = devm_kzalloc(&pdev->dev, sizeof(*comm), GFP_KERNEL);
if (!comm)
return -ENOMEM;
comm->pdev = pdev;
platform_set_drvdata(pdev, comm);
spin_lock_init(&comm->tx_lock);
spin_lock_init(&comm->mdio_lock);
spin_lock_init(&comm->int_mask_lock);
/* Get memory resource 0 from dts. */
comm->l2sw_reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(comm->l2sw_reg_base))
return PTR_ERR(comm->l2sw_reg_base);
/* Get irq resource from dts. */
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
irq = ret;
/* Get clock controller. */
comm->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(comm->clk)) {
dev_err_probe(&pdev->dev, PTR_ERR(comm->clk),
"Failed to retrieve clock controller!\n");
return PTR_ERR(comm->clk);
}
/* Get reset controller. */
comm->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(comm->rstc)) {
dev_err_probe(&pdev->dev, PTR_ERR(comm->rstc),
"Failed to retrieve reset controller!\n");
return PTR_ERR(comm->rstc);
}
/* Enable clock. */
ret = clk_prepare_enable(comm->clk);
if (ret)
return ret;
udelay(1);
/* Reset MAC */
reset_control_assert(comm->rstc);
udelay(1);
reset_control_deassert(comm->rstc);
usleep_range(1000, 2000);
/* Request irq. */
ret = devm_request_irq(&pdev->dev, irq, spl2sw_ethernet_interrupt, 0,
dev_name(&pdev->dev), comm);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq #%d!\n", irq);
goto out_clk_disable;
}
/* Initialize TX and RX descriptors. */
ret = spl2sw_descs_init(comm);
if (ret) {
dev_err(&pdev->dev, "Fail to initialize mac descriptors!\n");
spl2sw_descs_free(comm);
goto out_clk_disable;
}
/* Initialize MAC. */
spl2sw_mac_init(comm);
/* Initialize mdio bus */
ret = spl2sw_mdio_init(comm);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize mdio bus!\n");
goto out_clk_disable;
}
/* Get child node ethernet-ports. */
eth_ports_np = of_get_child_by_name(pdev->dev.of_node, "ethernet-ports");
if (!eth_ports_np) {
dev_err(&pdev->dev, "No ethernet-ports child node found!\n");
ret = -ENODEV;
goto out_free_mdio;
}
for (i = 0; i < MAX_NETDEV_NUM; i++) {
/* Get port@i of node ethernet-ports. */
port_np = spl2sw_get_eth_child_node(eth_ports_np, i);
if (!port_np)
continue;
/* Get phy-mode. */
if (of_get_phy_mode(port_np, &phy_mode)) {
dev_err(&pdev->dev, "Failed to get phy-mode property of port@%d!\n",
i);
continue;
}
/* Get phy-handle. */
phy_np = of_parse_phandle(port_np, "phy-handle", 0);
if (!phy_np) {
dev_err(&pdev->dev, "Failed to get phy-handle property of port@%d!\n",
i);
continue;
}
/* Get mac-address from nvmem. */
ret = spl2sw_nvmem_get_mac_address(&pdev->dev, port_np, mac_addr);
if (ret == -EPROBE_DEFER) {
goto out_unregister_dev;
} else if (ret) {
dev_info(&pdev->dev, "Generate a random mac address!\n");
eth_random_addr(mac_addr);
}
/* Initialize the net device. */
ret = spl2sw_init_netdev(pdev, mac_addr, &ndev);
if (ret)
goto out_unregister_dev;
ndev->irq = irq;
comm->ndev[i] = ndev;
mac = netdev_priv(ndev);
mac->phy_node = phy_np;
mac->phy_mode = phy_mode;
mac->comm = comm;
mac->lan_port = 0x1 << i; /* forward to port i */
mac->to_vlan = 0x1 << i; /* vlan group: i */
mac->vlan_id = i; /* vlan group: i */
/* Set MAC address */
ret = spl2sw_mac_addr_add(mac);
if (ret)
goto out_unregister_dev;
spl2sw_mac_rx_mode_set(mac);
}
/* Find first valid net device. */
for (i = 0; i < MAX_NETDEV_NUM; i++) {
if (comm->ndev[i])
break;
}
if (i >= MAX_NETDEV_NUM) {
dev_err(&pdev->dev, "No valid ethernet port!\n");
ret = -ENODEV;
goto out_free_mdio;
}
/* Save first valid net device */
ndev = comm->ndev[i];
ret = spl2sw_phy_connect(comm);
if (ret) {
netdev_err(ndev, "Failed to connect phy!\n");
goto out_unregister_dev;
}
/* Add and enable napi. */
netif_napi_add(ndev, &comm->rx_napi, spl2sw_rx_poll);
napi_enable(&comm->rx_napi);
netif_napi_add_tx(ndev, &comm->tx_napi, spl2sw_tx_poll);
napi_enable(&comm->tx_napi);
return 0;
out_unregister_dev:
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i])
unregister_netdev(comm->ndev[i]);
out_free_mdio:
spl2sw_mdio_remove(comm);
out_clk_disable:
clk_disable_unprepare(comm->clk);
return ret;
}
static int spl2sw_remove(struct platform_device *pdev)
{
struct spl2sw_common *comm;
int i;
comm = platform_get_drvdata(pdev);
spl2sw_phy_remove(comm);
/* Unregister and free net device. */
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i])
unregister_netdev(comm->ndev[i]);
comm->enable = 0;
spl2sw_mac_hw_stop(comm);
spl2sw_descs_free(comm);
/* Disable and delete napi. */
napi_disable(&comm->rx_napi);
netif_napi_del(&comm->rx_napi);
napi_disable(&comm->tx_napi);
netif_napi_del(&comm->tx_napi);
spl2sw_mdio_remove(comm);
clk_disable_unprepare(comm->clk);
return 0;
}
static const struct of_device_id spl2sw_of_match[] = {
{.compatible = "sunplus,sp7021-emac"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, spl2sw_of_match);
static struct platform_driver spl2sw_driver = {
.probe = spl2sw_probe,
.remove = spl2sw_remove,
.driver = {
.name = "sp7021_emac",
.of_match_table = spl2sw_of_match,
},
};
module_platform_driver(spl2sw_driver);
MODULE_AUTHOR("Wells Lu <[email protected]>");
MODULE_DESCRIPTION("Sunplus Dual 10M/100M Ethernet driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/net/ethernet/sunplus/spl2sw_driver.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/of_mdio.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_phy.h"
static void spl2sw_mii_link_change(struct net_device *ndev)
{
struct spl2sw_mac *mac = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
struct spl2sw_common *comm = mac->comm;
u32 reg;
reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
if (phydev->link) {
reg |= FIELD_PREP(MAC_FORCE_RMII_LINK, mac->lan_port);
if (phydev->speed == 100) {
reg |= FIELD_PREP(MAC_FORCE_RMII_SPD, mac->lan_port);
} else {
reg &= FIELD_PREP(MAC_FORCE_RMII_SPD, ~mac->lan_port) |
~MAC_FORCE_RMII_SPD;
}
if (phydev->duplex) {
reg |= FIELD_PREP(MAC_FORCE_RMII_DPX, mac->lan_port);
} else {
reg &= FIELD_PREP(MAC_FORCE_RMII_DPX, ~mac->lan_port) |
~MAC_FORCE_RMII_DPX;
}
if (phydev->pause) {
reg |= FIELD_PREP(MAC_FORCE_RMII_FC, mac->lan_port);
} else {
reg &= FIELD_PREP(MAC_FORCE_RMII_FC, ~mac->lan_port) |
~MAC_FORCE_RMII_FC;
}
} else {
reg &= FIELD_PREP(MAC_FORCE_RMII_LINK, ~mac->lan_port) |
~MAC_FORCE_RMII_LINK;
}
writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
phy_print_status(phydev);
}
int spl2sw_phy_connect(struct spl2sw_common *comm)
{
struct phy_device *phydev;
struct net_device *ndev;
struct spl2sw_mac *mac;
int i;
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
ndev = comm->ndev[i];
mac = netdev_priv(ndev);
phydev = of_phy_connect(ndev, mac->phy_node, spl2sw_mii_link_change,
0, mac->phy_mode);
if (!phydev)
return -ENODEV;
phy_support_asym_pause(phydev);
phy_attached_info(phydev);
}
return 0;
}
void spl2sw_phy_remove(struct spl2sw_common *comm)
{
struct net_device *ndev;
int i;
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
ndev = comm->ndev[i];
if (ndev)
phy_disconnect(ndev->phydev);
}
}
|
linux-master
|
drivers/net/ethernet/sunplus/spl2sw_phy.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/spinlock.h>
#include <linux/of_mdio.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_int.h"
int spl2sw_rx_poll(struct napi_struct *napi, int budget)
{
struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, rx_napi);
struct spl2sw_mac_desc *desc, *h_desc;
struct net_device_stats *stats;
struct sk_buff *skb, *new_skb;
struct spl2sw_skb_info *sinfo;
int budget_left = budget;
unsigned long flags;
u32 rx_pos, pkg_len;
u32 num, rx_count;
s32 queue;
u32 mask;
int port;
u32 cmd;
u32 len;
/* Process high-priority queue and then low-priority queue. */
for (queue = 0; queue < RX_DESC_QUEUE_NUM; queue++) {
rx_pos = comm->rx_pos[queue];
rx_count = comm->rx_desc_num[queue];
for (num = 0; num < rx_count && budget_left; num++) {
sinfo = comm->rx_skb_info[queue] + rx_pos;
desc = comm->rx_desc[queue] + rx_pos;
cmd = desc->cmd1;
if (cmd & RXD_OWN)
break;
port = FIELD_GET(RXD_PKT_SP, cmd);
if (port < MAX_NETDEV_NUM && comm->ndev[port])
stats = &comm->ndev[port]->stats;
else
goto spl2sw_rx_poll_rec_err;
pkg_len = FIELD_GET(RXD_PKT_LEN, cmd);
if (unlikely((cmd & RXD_ERR_CODE) || pkg_len < ETH_ZLEN + 4)) {
stats->rx_length_errors++;
stats->rx_dropped++;
goto spl2sw_rx_poll_rec_err;
}
dma_unmap_single(&comm->pdev->dev, sinfo->mapping,
comm->rx_desc_buff_size, DMA_FROM_DEVICE);
skb = sinfo->skb;
skb_put(skb, pkg_len - 4); /* Minus FCS */
skb->ip_summed = CHECKSUM_NONE;
skb->protocol = eth_type_trans(skb, comm->ndev[port]);
len = skb->len;
netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += len;
/* Allocate a new skb for receiving. */
new_skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
if (unlikely(!new_skb)) {
desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
RXD_EOR : 0;
sinfo->skb = NULL;
sinfo->mapping = 0;
desc->addr1 = 0;
goto spl2sw_rx_poll_alloc_err;
}
sinfo->mapping = dma_map_single(&comm->pdev->dev, new_skb->data,
comm->rx_desc_buff_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(&comm->pdev->dev, sinfo->mapping)) {
dev_kfree_skb_irq(new_skb);
desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
RXD_EOR : 0;
sinfo->skb = NULL;
sinfo->mapping = 0;
desc->addr1 = 0;
goto spl2sw_rx_poll_alloc_err;
}
sinfo->skb = new_skb;
desc->addr1 = sinfo->mapping;
spl2sw_rx_poll_rec_err:
desc->cmd2 = (rx_pos == comm->rx_desc_num[queue] - 1) ?
RXD_EOR | comm->rx_desc_buff_size :
comm->rx_desc_buff_size;
wmb(); /* Set RXD_OWN after other fields are effective. */
desc->cmd1 = RXD_OWN;
spl2sw_rx_poll_alloc_err:
/* Move rx_pos to next position */
rx_pos = ((rx_pos + 1) == comm->rx_desc_num[queue]) ? 0 : rx_pos + 1;
budget_left--;
/* If there are packets in high-priority queue,
* stop processing low-priority queue.
*/
if (queue == 1 && !(h_desc->cmd1 & RXD_OWN))
break;
}
comm->rx_pos[queue] = rx_pos;
/* Save pointer to last rx descriptor of high-priority queue. */
if (queue == 0)
h_desc = comm->rx_desc[queue] + rx_pos;
}
spin_lock_irqsave(&comm->int_mask_lock, flags);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask &= ~MAC_INT_RX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock_irqrestore(&comm->int_mask_lock, flags);
napi_complete(napi);
return budget - budget_left;
}
int spl2sw_tx_poll(struct napi_struct *napi, int budget)
{
struct spl2sw_common *comm = container_of(napi, struct spl2sw_common, tx_napi);
struct spl2sw_skb_info *skbinfo;
struct net_device_stats *stats;
int budget_left = budget;
unsigned long flags;
u32 tx_done_pos;
u32 mask;
u32 cmd;
int i;
spin_lock(&comm->tx_lock);
tx_done_pos = comm->tx_done_pos;
while (((tx_done_pos != comm->tx_pos) || (comm->tx_desc_full == 1)) && budget_left) {
cmd = comm->tx_desc[tx_done_pos].cmd1;
if (cmd & TXD_OWN)
break;
skbinfo = &comm->tx_temp_skb_info[tx_done_pos];
if (unlikely(!skbinfo->skb))
goto spl2sw_tx_poll_next;
i = ffs(FIELD_GET(TXD_VLAN, cmd)) - 1;
if (i < MAX_NETDEV_NUM && comm->ndev[i])
stats = &comm->ndev[i]->stats;
else
goto spl2sw_tx_poll_unmap;
if (unlikely(cmd & (TXD_ERR_CODE))) {
stats->tx_errors++;
} else {
stats->tx_packets++;
stats->tx_bytes += skbinfo->len;
}
spl2sw_tx_poll_unmap:
dma_unmap_single(&comm->pdev->dev, skbinfo->mapping, skbinfo->len,
DMA_TO_DEVICE);
skbinfo->mapping = 0;
dev_kfree_skb_irq(skbinfo->skb);
skbinfo->skb = NULL;
spl2sw_tx_poll_next:
/* Move tx_done_pos to next position */
tx_done_pos = ((tx_done_pos + 1) == TX_DESC_NUM) ? 0 : tx_done_pos + 1;
if (comm->tx_desc_full == 1)
comm->tx_desc_full = 0;
budget_left--;
}
comm->tx_done_pos = tx_done_pos;
if (!comm->tx_desc_full)
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i])
if (netif_queue_stopped(comm->ndev[i]))
netif_wake_queue(comm->ndev[i]);
spin_unlock(&comm->tx_lock);
spin_lock_irqsave(&comm->int_mask_lock, flags);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask &= ~MAC_INT_TX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock_irqrestore(&comm->int_mask_lock, flags);
napi_complete(napi);
return budget - budget_left;
}
irqreturn_t spl2sw_ethernet_interrupt(int irq, void *dev_id)
{
struct spl2sw_common *comm = (struct spl2sw_common *)dev_id;
u32 status;
u32 mask;
int i;
status = readl(comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
if (unlikely(!status)) {
dev_dbg(&comm->pdev->dev, "Interrupt status is null!\n");
goto spl2sw_ethernet_int_out;
}
writel(status, comm->l2sw_reg_base + L2SW_SW_INT_STATUS_0);
if (status & MAC_INT_RX) {
/* Disable RX interrupts. */
spin_lock(&comm->int_mask_lock);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask |= MAC_INT_RX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock(&comm->int_mask_lock);
if (unlikely(status & MAC_INT_RX_DES_ERR)) {
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
comm->ndev[i]->stats.rx_fifo_errors++;
break;
}
dev_dbg(&comm->pdev->dev, "Illegal RX Descriptor!\n");
}
napi_schedule(&comm->rx_napi);
}
if (status & MAC_INT_TX) {
/* Disable TX interrupts. */
spin_lock(&comm->int_mask_lock);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask |= MAC_INT_TX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock(&comm->int_mask_lock);
if (unlikely(status & MAC_INT_TX_DES_ERR)) {
for (i = 0; i < MAX_NETDEV_NUM; i++)
if (comm->ndev[i]) {
comm->ndev[i]->stats.tx_fifo_errors++;
break;
}
dev_dbg(&comm->pdev->dev, "Illegal TX Descriptor Error\n");
spin_lock(&comm->int_mask_lock);
mask = readl(comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
mask &= ~MAC_INT_TX;
writel(mask, comm->l2sw_reg_base + L2SW_SW_INT_MASK_0);
spin_unlock(&comm->int_mask_lock);
} else {
napi_schedule(&comm->tx_napi);
}
}
spl2sw_ethernet_int_out:
return IRQ_HANDLED;
}
|
linux-master
|
drivers/net/ethernet/sunplus/spl2sw_int.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/bitfield.h>
#include <linux/of_mdio.h>
#include "spl2sw_register.h"
#include "spl2sw_define.h"
#include "spl2sw_mdio.h"
#define SPL2SW_MDIO_READ_CMD 0x02
#define SPL2SW_MDIO_WRITE_CMD 0x01
static int spl2sw_mdio_access(struct spl2sw_common *comm, u8 cmd, u8 addr, u8 regnum, u16 wdata)
{
u32 reg, reg2;
u32 val;
int ret;
/* Note that addr (of phy) should match either ext_phy0_addr
* or ext_phy1_addr, or mdio commands won't be sent out.
*/
reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
reg &= ~MAC_EXT_PHY0_ADDR;
reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, addr);
reg2 = FIELD_PREP(MAC_CPU_PHY_WT_DATA, wdata) | FIELD_PREP(MAC_CPU_PHY_CMD, cmd) |
FIELD_PREP(MAC_CPU_PHY_REG_ADDR, regnum) | FIELD_PREP(MAC_CPU_PHY_ADDR, addr);
/* Set ext_phy0_addr and then issue mdio command.
* No interrupt is allowed in between.
*/
spin_lock_irq(&comm->mdio_lock);
writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
writel(reg2, comm->l2sw_reg_base + L2SW_PHY_CNTL_REG0);
spin_unlock_irq(&comm->mdio_lock);
ret = read_poll_timeout(readl, val, val & cmd, 1, 1000, true,
comm->l2sw_reg_base + L2SW_PHY_CNTL_REG1);
/* Set ext_phy0_addr back to 31 to prevent
* from sending mdio command to phy by
* hardware auto-mdio function.
*/
reg = readl(comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
reg &= ~MAC_EXT_PHY0_ADDR;
reg |= FIELD_PREP(MAC_EXT_PHY0_ADDR, 31);
writel(reg, comm->l2sw_reg_base + L2SW_MAC_FORCE_MODE);
if (ret == 0)
return val >> 16;
else
return ret;
}
static int spl2sw_mii_read(struct mii_bus *bus, int addr, int regnum)
{
struct spl2sw_common *comm = bus->priv;
return spl2sw_mdio_access(comm, SPL2SW_MDIO_READ_CMD, addr, regnum, 0);
}
static int spl2sw_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
{
struct spl2sw_common *comm = bus->priv;
int ret;
ret = spl2sw_mdio_access(comm, SPL2SW_MDIO_WRITE_CMD, addr, regnum, val);
if (ret < 0)
return ret;
return 0;
}
u32 spl2sw_mdio_init(struct spl2sw_common *comm)
{
struct device_node *mdio_np;
struct mii_bus *mii_bus;
int ret;
/* Get mdio child node. */
mdio_np = of_get_child_by_name(comm->pdev->dev.of_node, "mdio");
if (!mdio_np) {
dev_err(&comm->pdev->dev, "No mdio child node found!\n");
return -ENODEV;
}
/* Allocate and register mdio bus. */
mii_bus = devm_mdiobus_alloc(&comm->pdev->dev);
if (!mii_bus) {
ret = -ENOMEM;
goto out;
}
mii_bus->name = "sunplus_mii_bus";
mii_bus->parent = &comm->pdev->dev;
mii_bus->priv = comm;
mii_bus->read = spl2sw_mii_read;
mii_bus->write = spl2sw_mii_write;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&comm->pdev->dev));
ret = of_mdiobus_register(mii_bus, mdio_np);
if (ret) {
dev_err(&comm->pdev->dev, "Failed to register mdiobus!\n");
goto out;
}
comm->mii_bus = mii_bus;
out:
of_node_put(mdio_np);
return ret;
}
void spl2sw_mdio_remove(struct spl2sw_common *comm)
{
if (comm->mii_bus) {
mdiobus_unregister(comm->mii_bus);
comm->mii_bus = NULL;
}
}
|
linux-master
|
drivers/net/ethernet/sunplus/spl2sw_mdio.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Copyright 2020-2021 NXP
*/
#include <net/devlink.h>
#include "ocelot.h"
/* The queue system tracks four resource consumptions:
* Resource 0: Memory tracked per source port
* Resource 1: Frame references tracked per source port
* Resource 2: Memory tracked per destination port
* Resource 3: Frame references tracked per destination port
*/
#define OCELOT_RESOURCE_SZ 256
#define OCELOT_NUM_RESOURCES 4
#define BUF_xxxx_I (0 * OCELOT_RESOURCE_SZ)
#define REF_xxxx_I (1 * OCELOT_RESOURCE_SZ)
#define BUF_xxxx_E (2 * OCELOT_RESOURCE_SZ)
#define REF_xxxx_E (3 * OCELOT_RESOURCE_SZ)
/* For each resource type there are 4 types of watermarks:
* Q_RSRV: reservation per QoS class per port
* PRIO_SHR: sharing watermark per QoS class across all ports
* P_RSRV: reservation per port
* COL_SHR: sharing watermark per color (drop precedence) across all ports
*/
#define xxx_Q_RSRV_x 0
#define xxx_PRIO_SHR_x 216
#define xxx_P_RSRV_x 224
#define xxx_COL_SHR_x 254
/* Reservation Watermarks
* ----------------------
*
* For setting up the reserved areas, egress watermarks exist per port and per
* QoS class for both ingress and egress.
*/
/* Amount of packet buffer
* | per QoS class
* | | reserved
* | | | per egress port
* | | | |
* V V v v
* BUF_Q_RSRV_E
*/
#define BUF_Q_RSRV_E(port, prio) \
(BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
/* Amount of packet buffer
* | for all port's traffic classes
* | | reserved
* | | | per egress port
* | | | |
* V V v v
* BUF_P_RSRV_E
*/
#define BUF_P_RSRV_E(port) \
(BUF_xxxx_E + xxx_P_RSRV_x + (port))
/* Amount of packet buffer
* | per QoS class
* | | reserved
* | | | per ingress port
* | | | |
* V V v v
* BUF_Q_RSRV_I
*/
#define BUF_Q_RSRV_I(port, prio) \
(BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
/* Amount of packet buffer
* | for all port's traffic classes
* | | reserved
* | | | per ingress port
* | | | |
* V V v v
* BUF_P_RSRV_I
*/
#define BUF_P_RSRV_I(port) \
(BUF_xxxx_I + xxx_P_RSRV_x + (port))
/* Amount of frame references
* | per QoS class
* | | reserved
* | | | per egress port
* | | | |
* V V v v
* REF_Q_RSRV_E
*/
#define REF_Q_RSRV_E(port, prio) \
(REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
/* Amount of frame references
* | for all port's traffic classes
* | | reserved
* | | | per egress port
* | | | |
* V V v v
* REF_P_RSRV_E
*/
#define REF_P_RSRV_E(port) \
(REF_xxxx_E + xxx_P_RSRV_x + (port))
/* Amount of frame references
* | per QoS class
* | | reserved
* | | | per ingress port
* | | | |
* V V v v
* REF_Q_RSRV_I
*/
#define REF_Q_RSRV_I(port, prio) \
(REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
/* Amount of frame references
* | for all port's traffic classes
* | | reserved
* | | | per ingress port
* | | | |
* V V v v
* REF_P_RSRV_I
*/
#define REF_P_RSRV_I(port) \
(REF_xxxx_I + xxx_P_RSRV_x + (port))
/* Sharing Watermarks
* ------------------
*
* The shared memory area is shared between all ports.
*/
/* Amount of buffer
* | per QoS class
* | | from the shared memory area
* | | | for egress traffic
* | | | |
* V V v v
* BUF_PRIO_SHR_E
*/
#define BUF_PRIO_SHR_E(prio) \
(BUF_xxxx_E + xxx_PRIO_SHR_x + (prio))
/* Amount of buffer
* | per color (drop precedence level)
* | | from the shared memory area
* | | | for egress traffic
* | | | |
* V V v v
* BUF_COL_SHR_E
*/
#define BUF_COL_SHR_E(dp) \
(BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
/* Amount of buffer
* | per QoS class
* | | from the shared memory area
* | | | for ingress traffic
* | | | |
* V V v v
* BUF_PRIO_SHR_I
*/
#define BUF_PRIO_SHR_I(prio) \
(BUF_xxxx_I + xxx_PRIO_SHR_x + (prio))
/* Amount of buffer
* | per color (drop precedence level)
* | | from the shared memory area
* | | | for ingress traffic
* | | | |
* V V v v
* BUF_COL_SHR_I
*/
#define BUF_COL_SHR_I(dp) \
(BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
/* Amount of frame references
* | per QoS class
* | | from the shared area
* | | | for egress traffic
* | | | |
* V V v v
* REF_PRIO_SHR_E
*/
#define REF_PRIO_SHR_E(prio) \
(REF_xxxx_E + xxx_PRIO_SHR_x + (prio))
/* Amount of frame references
* | per color (drop precedence level)
* | | from the shared area
* | | | for egress traffic
* | | | |
* V V v v
* REF_COL_SHR_E
*/
#define REF_COL_SHR_E(dp) \
(REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
/* Amount of frame references
* | per QoS class
* | | from the shared area
* | | | for ingress traffic
* | | | |
* V V v v
* REF_PRIO_SHR_I
*/
#define REF_PRIO_SHR_I(prio) \
(REF_xxxx_I + xxx_PRIO_SHR_x + (prio))
/* Amount of frame references
* | per color (drop precedence level)
* | | from the shared area
* | | | for ingress traffic
* | | | |
* V V v v
* REF_COL_SHR_I
*/
#define REF_COL_SHR_I(dp) \
(REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
static u32 ocelot_wm_read(struct ocelot *ocelot, int index)
{
int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index);
return ocelot->ops->wm_dec(wm);
}
static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val)
{
u32 wm = ocelot->ops->wm_enc(val);
ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index);
}
static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse,
u32 *maxuse)
{
int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index);
return ocelot->ops->wm_stat(res_stat, inuse, maxuse);
}
/* The hardware comes out of reset with strange defaults: the sum of all
* reservations for frame memory is larger than the total buffer size.
* One has to wonder how can the reservation watermarks still guarantee
* anything under congestion.
* Bring some sense into the hardware by changing the defaults to disable all
* reservations and rely only on the sharing watermark for frames with drop
* precedence 0. The user can still explicitly request reservations per port
* and per port-tc through devlink-sb.
*/
static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot,
int port)
{
int prio;
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0);
ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0);
ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0);
ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0);
}
ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0);
ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0);
ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0);
ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0);
}
/* We want the sharing watermarks to consume all nonreserved resources, for
* efficient resource utilization (a single traffic flow should be able to use
* up the entire buffer space and frame resources as long as there's no
* interference).
* The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2
* per color (drop precedence).
* The trouble with configuring these sharing watermarks is that:
* (1) There's a risk that we overcommit the resources if we configure
* (a) all 8 per-TC sharing watermarks to the max
* (b) all 2 per-color sharing watermarks to the max
* (2) There's a risk that we undercommit the resources if we configure
* (a) all 8 per-TC sharing watermarks to "max / 8"
* (b) all 2 per-color sharing watermarks to "max / 2"
* So for Linux, let's just disable the sharing watermarks per traffic class
* (setting them to 0 will make them always exceeded), and rely only on the
* sharing watermark for drop priority 0. So frames with drop priority set to 1
* by QoS classification or policing will still be allowed, but only as long as
* the port and port-TC reservations are not exceeded.
*/
static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot)
{
int prio;
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0);
ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0);
ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0);
ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0);
}
}
static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i,
u32 *buf_rsrv_e)
{
int port, prio;
*buf_rsrv_i = 0;
*buf_rsrv_e = 0;
for (port = 0; port <= ocelot->num_phys_ports; port++) {
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
*buf_rsrv_i += ocelot_wm_read(ocelot,
BUF_Q_RSRV_I(port, prio));
*buf_rsrv_e += ocelot_wm_read(ocelot,
BUF_Q_RSRV_E(port, prio));
}
*buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port));
*buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port));
}
*buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ;
*buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ;
}
static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i,
u32 *ref_rsrv_e)
{
int port, prio;
*ref_rsrv_i = 0;
*ref_rsrv_e = 0;
for (port = 0; port <= ocelot->num_phys_ports; port++) {
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
*ref_rsrv_i += ocelot_wm_read(ocelot,
REF_Q_RSRV_I(port, prio));
*ref_rsrv_e += ocelot_wm_read(ocelot,
REF_Q_RSRV_E(port, prio));
}
*ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port));
*ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port));
}
}
/* Calculate all reservations, then set up the sharing watermark for DP=0 to
* consume the remaining resources up to the pool's configured size.
*/
static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot)
{
u32 buf_rsrv_i, buf_rsrv_e;
u32 ref_rsrv_i, ref_rsrv_e;
u32 buf_shr_i, buf_shr_e;
u32 ref_shr_i, ref_shr_e;
ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] -
buf_rsrv_i;
buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] -
buf_rsrv_e;
ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] -
ref_rsrv_i;
ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] -
ref_rsrv_e;
buf_shr_i /= OCELOT_BUFFER_CELL_SZ;
buf_shr_e /= OCELOT_BUFFER_CELL_SZ;
ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i);
ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e);
ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e);
ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i);
ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0);
ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0);
ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0);
ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0);
}
/* Ensure that all reservations can be enforced */
static int ocelot_watermark_validate(struct ocelot *ocelot,
struct netlink_ext_ack *extack)
{
u32 buf_rsrv_i, buf_rsrv_e;
u32 ref_rsrv_i, ref_rsrv_e;
ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) {
NL_SET_ERR_MSG_MOD(extack,
"Ingress frame reservations exceed pool size");
return -ERANGE;
}
if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) {
NL_SET_ERR_MSG_MOD(extack,
"Egress frame reservations exceed pool size");
return -ERANGE;
}
if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) {
NL_SET_ERR_MSG_MOD(extack,
"Ingress reference reservations exceed pool size");
return -ERANGE;
}
if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) {
NL_SET_ERR_MSG_MOD(extack,
"Egress reference reservations exceed pool size");
return -ERANGE;
}
return 0;
}
/* The hardware works like this:
*
* Frame forwarding decision taken
* |
* v
* +--------------------+--------------------+--------------------+
* | | | |
* v v v v
* Ingress memory Egress memory Ingress frame Egress frame
* check check reference check reference check
* | | | |
* v v v v
* BUF_Q_RSRV_I ok BUF_Q_RSRV_E ok REF_Q_RSRV_I ok REF_Q_RSRV_E ok
*(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+
* | | | | | | | |
* |exceeded | |exceeded | |exceeded | |exceeded |
* v | v | v | v |
* BUF_P_RSRV_I ok| BUF_P_RSRV_E ok| REF_P_RSRV_I ok| REF_P_RSRV_E ok|
* (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+
* | | | | | | | |
* |exceeded | |exceeded | |exceeded | |exceeded |
* v | v | v | v |
* BUF_PRIO_SHR_I ok| BUF_PRIO_SHR_E ok| REF_PRIO_SHR_I ok| REF_PRIO_SHR_E ok|
* (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+
* | | | | | | | |
* |exceeded | |exceeded | |exceeded | |exceeded |
* v | v | v | v |
* BUF_COL_SHR_I ok| BUF_COL_SHR_E ok| REF_COL_SHR_I ok| REF_COL_SHR_E ok|
* (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+
* | | | | | | | |
* |exceeded | |exceeded | |exceeded | |exceeded |
* v v v v v v v v
* fail success fail success fail success fail success
* | | | | | | | |
* v v v v v v v v
* +-----+----+ +-----+----+ +-----+----+ +-----+-----+
* | | | |
* +-------> OR <-------+ +-------> OR <-------+
* | |
* v v
* +----------------> AND <-----------------+
* |
* v
* FIFO drop / accept
*
* We are modeling each of the 4 parallel lookups as a devlink-sb pool.
* At least one (ingress or egress) memory pool and one (ingress or egress)
* frame reference pool need to have resources for frame acceptance to succeed.
*
* The following watermarks are controlled explicitly through devlink-sb:
* BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E
* BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E
* The following watermarks are controlled implicitly through devlink-sb:
* BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E
* The following watermarks are unused and disabled:
* BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E
*
* This function overrides the hardware defaults with more sane ones (no
* reservations by default, let sharing use all resources) and disables the
* unused watermarks.
*/
static void ocelot_watermark_init(struct ocelot *ocelot)
{
int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0);
int port;
ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE);
for (port = 0; port <= ocelot->num_phys_ports; port++)
ocelot_disable_reservation_watermarks(ocelot, port);
ocelot_disable_tc_sharing_watermarks(ocelot);
ocelot_setup_sharing_watermarks(ocelot);
}
/* Watermark encode
* Bit 8: Unit; 0:1, 1:16
* Bit 7-0: Value to be multiplied with unit
*/
u16 ocelot_wm_enc(u16 value)
{
WARN_ON(value >= 16 * BIT(8));
if (value >= BIT(8))
return BIT(8) | (value / 16);
return value;
}
EXPORT_SYMBOL(ocelot_wm_enc);
u16 ocelot_wm_dec(u16 wm)
{
if (wm & BIT(8))
return (wm & GENMASK(7, 0)) * 16;
return wm;
}
EXPORT_SYMBOL(ocelot_wm_dec);
void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
{
*inuse = (val & GENMASK(23, 12)) >> 12;
*maxuse = val & GENMASK(11, 0);
}
EXPORT_SYMBOL(ocelot_wm_stat);
/* Pool size and type are fixed up at runtime. Keeping this structure to
* look up the cell size multipliers.
*/
static const struct devlink_sb_pool_info ocelot_sb_pool[] = {
[OCELOT_SB_BUF] = {
.cell_size = OCELOT_BUFFER_CELL_SZ,
.threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
},
[OCELOT_SB_REF] = {
.cell_size = 1,
.threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
},
};
/* Returns the pool size configured through ocelot_sb_pool_set */
int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
if (sb_index >= OCELOT_SB_NUM)
return -ENODEV;
if (pool_index >= OCELOT_SB_POOL_NUM)
return -ENODEV;
*pool_info = ocelot_sb_pool[sb_index];
pool_info->size = ocelot->pool_size[sb_index][pool_index];
if (pool_index)
pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS;
else
pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_pool_get);
/* The pool size received here configures the total amount of resources used on
* ingress (or on egress, depending upon the pool index). The pool size, minus
* the values for the port and port-tc reservations, is written into the
* COL_SHR(dp=0) sharing watermark.
*/
int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
u32 old_pool_size;
int err;
if (sb_index >= OCELOT_SB_NUM) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid sb, use 0 for buffers and 1 for frame references");
return -ENODEV;
}
if (pool_index >= OCELOT_SB_POOL_NUM) {
NL_SET_ERR_MSG_MOD(extack,
"Invalid pool, use 0 for ingress and 1 for egress");
return -ENODEV;
}
if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) {
NL_SET_ERR_MSG_MOD(extack,
"Only static threshold supported");
return -EOPNOTSUPP;
}
old_pool_size = ocelot->pool_size[sb_index][pool_index];
ocelot->pool_size[sb_index][pool_index] = size;
err = ocelot_watermark_validate(ocelot, extack);
if (err) {
ocelot->pool_size[sb_index][pool_index] = old_pool_size;
return err;
}
ocelot_setup_sharing_watermarks(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_sb_pool_set);
/* This retrieves the configuration made with ocelot_sb_port_pool_set */
int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
int wm_index;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = BUF_P_RSRV_I(port);
else
wm_index = BUF_P_RSRV_E(port);
break;
case OCELOT_SB_REF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = REF_P_RSRV_I(port);
else
wm_index = REF_P_RSRV_E(port);
break;
default:
return -ENODEV;
}
*p_threshold = ocelot_wm_read(ocelot, wm_index);
*p_threshold *= ocelot_sb_pool[sb_index].cell_size;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_port_pool_get);
/* This configures the P_RSRV per-port reserved resource watermark */
int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 threshold, struct netlink_ext_ack *extack)
{
int wm_index, err;
u32 old_thr;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = BUF_P_RSRV_I(port);
else
wm_index = BUF_P_RSRV_E(port);
break;
case OCELOT_SB_REF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = REF_P_RSRV_I(port);
else
wm_index = REF_P_RSRV_E(port);
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
return -ENODEV;
}
threshold /= ocelot_sb_pool[sb_index].cell_size;
old_thr = ocelot_wm_read(ocelot, wm_index);
ocelot_wm_write(ocelot, wm_index, threshold);
err = ocelot_watermark_validate(ocelot, extack);
if (err) {
ocelot_wm_write(ocelot, wm_index, old_thr);
return err;
}
ocelot_setup_sharing_watermarks(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_sb_port_pool_set);
/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */
int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
int wm_index;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = BUF_Q_RSRV_I(port, tc_index);
else
wm_index = BUF_Q_RSRV_E(port, tc_index);
break;
case OCELOT_SB_REF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = REF_Q_RSRV_I(port, tc_index);
else
wm_index = REF_Q_RSRV_E(port, tc_index);
break;
default:
return -ENODEV;
}
*p_threshold = ocelot_wm_read(ocelot, wm_index);
*p_threshold *= ocelot_sb_pool[sb_index].cell_size;
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
*p_pool_index = 0;
else
*p_pool_index = 1;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get);
/* This configures the Q_RSRV per-port-tc reserved resource watermark */
int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
int wm_index, err;
u32 old_thr;
/* Paranoid check? */
if (pool_index == OCELOT_SB_POOL_ING &&
pool_type != DEVLINK_SB_POOL_TYPE_INGRESS)
return -EINVAL;
if (pool_index == OCELOT_SB_POOL_EGR &&
pool_type != DEVLINK_SB_POOL_TYPE_EGRESS)
return -EINVAL;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = BUF_Q_RSRV_I(port, tc_index);
else
wm_index = BUF_Q_RSRV_E(port, tc_index);
break;
case OCELOT_SB_REF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = REF_Q_RSRV_I(port, tc_index);
else
wm_index = REF_Q_RSRV_E(port, tc_index);
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
return -ENODEV;
}
threshold /= ocelot_sb_pool[sb_index].cell_size;
old_thr = ocelot_wm_read(ocelot, wm_index);
ocelot_wm_write(ocelot, wm_index, threshold);
err = ocelot_watermark_validate(ocelot, extack);
if (err) {
ocelot_wm_write(ocelot, wm_index, old_thr);
return err;
}
ocelot_setup_sharing_watermarks(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set);
/* The hardware does not support atomic snapshots, we'll read out the
* occupancy registers individually and have this as just a stub.
*/
int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index)
{
return 0;
}
EXPORT_SYMBOL(ocelot_sb_occ_snapshot);
/* The watermark occupancy registers are cleared upon read,
* so let's read them.
*/
int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index)
{
u32 inuse, maxuse;
int port, prio;
switch (sb_index) {
case OCELOT_SB_BUF:
for (port = 0; port <= ocelot->num_phys_ports; port++) {
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio),
&inuse, &maxuse);
ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio),
&inuse, &maxuse);
}
ocelot_wm_status(ocelot, BUF_P_RSRV_I(port),
&inuse, &maxuse);
ocelot_wm_status(ocelot, BUF_P_RSRV_E(port),
&inuse, &maxuse);
}
break;
case OCELOT_SB_REF:
for (port = 0; port <= ocelot->num_phys_ports; port++) {
for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio),
&inuse, &maxuse);
ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio),
&inuse, &maxuse);
}
ocelot_wm_status(ocelot, REF_P_RSRV_I(port),
&inuse, &maxuse);
ocelot_wm_status(ocelot, REF_P_RSRV_E(port),
&inuse, &maxuse);
}
break;
default:
return -ENODEV;
}
return 0;
}
EXPORT_SYMBOL(ocelot_sb_occ_max_clear);
/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */
int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 pool_index,
u32 *p_cur, u32 *p_max)
{
int wm_index;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = BUF_P_RSRV_I(port);
else
wm_index = BUF_P_RSRV_E(port);
break;
case OCELOT_SB_REF:
if (pool_index == OCELOT_SB_POOL_ING)
wm_index = REF_P_RSRV_I(port);
else
wm_index = REF_P_RSRV_E(port);
break;
default:
return -ENODEV;
}
ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
*p_cur *= ocelot_sb_pool[sb_index].cell_size;
*p_max *= ocelot_sb_pool[sb_index].cell_size;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get);
/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */
int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
int wm_index;
switch (sb_index) {
case OCELOT_SB_BUF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = BUF_Q_RSRV_I(port, tc_index);
else
wm_index = BUF_Q_RSRV_E(port, tc_index);
break;
case OCELOT_SB_REF:
if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
wm_index = REF_Q_RSRV_I(port, tc_index);
else
wm_index = REF_Q_RSRV_E(port, tc_index);
break;
default:
return -ENODEV;
}
ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
*p_cur *= ocelot_sb_pool[sb_index].cell_size;
*p_max *= ocelot_sb_pool[sb_index].cell_size;
return 0;
}
EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get);
int ocelot_devlink_sb_register(struct ocelot *ocelot)
{
int err;
err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF,
ocelot->packet_buffer_size, 1, 1,
OCELOT_NUM_TC, OCELOT_NUM_TC);
if (err)
return err;
err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF,
ocelot->num_frame_refs, 1, 1,
OCELOT_NUM_TC, OCELOT_NUM_TC);
if (err) {
devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
return err;
}
ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size;
ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size;
ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs;
ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs;
ocelot_watermark_init(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_devlink_sb_register);
void ocelot_devlink_sb_unregister(struct ocelot *ocelot)
{
devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF);
}
EXPORT_SYMBOL(ocelot_devlink_sb_unregister);
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_devlink.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Hardware library for MAC Merge Layer and Frame Preemption on TSN-capable
* switches (VSC9959)
*
* Copyright 2022-2023 NXP
*/
#include <linux/ethtool.h>
#include <soc/mscc/ocelot.h>
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_qsys.h>
#include "ocelot.h"
static const char *
mm_verify_state_to_string(enum ethtool_mm_verify_status state)
{
switch (state) {
case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
return "INITIAL";
case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
return "VERIFYING";
case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
return "SUCCEEDED";
case ETHTOOL_MM_VERIFY_STATUS_FAILED:
return "FAILED";
case ETHTOOL_MM_VERIFY_STATUS_DISABLED:
return "DISABLED";
default:
return "UNKNOWN";
}
}
static enum ethtool_mm_verify_status ocelot_mm_verify_status(u32 val)
{
switch (DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_X(val)) {
case 0:
return ETHTOOL_MM_VERIFY_STATUS_INITIAL;
case 1:
return ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
case 2:
return ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
case 3:
return ETHTOOL_MM_VERIFY_STATUS_FAILED;
case 4:
return ETHTOOL_MM_VERIFY_STATUS_DISABLED;
default:
return ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
}
}
void ocelot_port_update_active_preemptible_tcs(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_mm_state *mm = &ocelot->mm[port];
u32 val = 0;
lockdep_assert_held(&ocelot->fwd_domain_lock);
/* Only commit preemptible TCs when MAC Merge is active.
* On NXP LS1028A, when using QSGMII, the port hangs if transmitting
* preemptible frames at any other link speed than gigabit, so avoid
* preemption at lower speeds in this PHY mode.
*/
if ((ocelot_port->phy_mode != PHY_INTERFACE_MODE_QSGMII ||
ocelot_port->speed == SPEED_1000) && mm->tx_active)
val = mm->preemptible_tcs;
/* Cut through switching doesn't work for preemptible priorities,
* so first make sure it is disabled. Also, changing the preemptible
* TCs affects the oversized frame dropping logic, so that needs to be
* re-triggered. And since tas_guard_bands_update() also implicitly
* calls cut_through_fwd(), we don't need to explicitly call it.
*/
mm->active_preemptible_tcs = val;
ocelot->ops->tas_guard_bands_update(ocelot, port);
dev_dbg(ocelot->dev,
"port %d %s/%s, MM TX %s, preemptible TCs 0x%x, active 0x%x\n",
port, phy_modes(ocelot_port->phy_mode),
phy_speed_to_str(ocelot_port->speed),
mm->tx_active ? "active" : "inactive", mm->preemptible_tcs,
mm->active_preemptible_tcs);
ocelot_rmw_rix(ocelot, QSYS_PREEMPTION_CFG_P_QUEUES(val),
QSYS_PREEMPTION_CFG_P_QUEUES_M,
QSYS_PREEMPTION_CFG, port);
}
void ocelot_port_change_fp(struct ocelot *ocelot, int port,
unsigned long preemptible_tcs)
{
struct ocelot_mm_state *mm = &ocelot->mm[port];
lockdep_assert_held(&ocelot->fwd_domain_lock);
if (mm->preemptible_tcs == preemptible_tcs)
return;
mm->preemptible_tcs = preemptible_tcs;
ocelot_port_update_active_preemptible_tcs(ocelot, port);
}
static void ocelot_mm_update_port_status(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_mm_state *mm = &ocelot->mm[port];
enum ethtool_mm_verify_status verify_status;
u32 val, ack = 0;
if (!mm->tx_enabled)
return;
val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS);
verify_status = ocelot_mm_verify_status(val);
if (mm->verify_status != verify_status) {
dev_dbg(ocelot->dev,
"Port %d MAC Merge verification state %s\n",
port, mm_verify_state_to_string(verify_status));
mm->verify_status = verify_status;
}
if (val & DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY) {
mm->tx_active = !!(val & DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STATUS);
dev_dbg(ocelot->dev, "Port %d TX preemption %s\n",
port, mm->tx_active ? "active" : "inactive");
ocelot_port_update_active_preemptible_tcs(ocelot, port);
ack |= DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY;
}
if (val & DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY) {
dev_err(ocelot->dev,
"Unexpected P-frame received on port %d while verification was unsuccessful or not yet verified\n",
port);
ack |= DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY;
}
if (val & DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY) {
dev_err(ocelot->dev,
"Unexpected P-frame requested to be transmitted on port %d while verification was unsuccessful or not yet verified, or MM_TX_ENA=0\n",
port);
ack |= DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY;
}
if (ack)
ocelot_port_writel(ocelot_port, ack, DEV_MM_STATUS);
}
void ocelot_mm_irq(struct ocelot *ocelot)
{
int port;
mutex_lock(&ocelot->fwd_domain_lock);
for (port = 0; port < ocelot->num_phys_ports; port++)
ocelot_mm_update_port_status(ocelot, port);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL_GPL(ocelot_mm_irq);
int ocelot_port_set_mm(struct ocelot *ocelot, int port,
struct ethtool_mm_cfg *cfg,
struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 mm_enable = 0, verify_disable = 0, add_frag_size;
struct ocelot_mm_state *mm;
int err;
if (!ocelot->mm_supported)
return -EOPNOTSUPP;
mm = &ocelot->mm[port];
err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
&add_frag_size, extack);
if (err)
return err;
if (cfg->pmac_enabled)
mm_enable |= DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA;
if (cfg->tx_enabled)
mm_enable |= DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA;
if (!cfg->verify_enabled)
verify_disable = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS;
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port_rmwl(ocelot_port, mm_enable,
DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA |
DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA,
DEV_MM_ENABLE_CONFIG);
ocelot_port_rmwl(ocelot_port, verify_disable |
DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(cfg->verify_time),
DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS |
DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M,
DEV_MM_VERIF_CONFIG);
ocelot_rmw_rix(ocelot,
QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(add_frag_size),
QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M,
QSYS_PREEMPTION_CFG,
port);
/* The switch will emit an IRQ when TX is disabled, to notify that it
* has become inactive. We optimize ocelot_mm_update_port_status() to
* not bother processing MM IRQs at all for ports with TX disabled,
* but we need to ACK this IRQ now, while mm->tx_enabled is still set,
* otherwise we get an IRQ storm.
*/
if (mm->tx_enabled && !cfg->tx_enabled) {
ocelot_mm_update_port_status(ocelot, port);
WARN_ON(mm->tx_active);
}
mm->tx_enabled = cfg->tx_enabled;
mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_port_set_mm);
int ocelot_port_get_mm(struct ocelot *ocelot, int port,
struct ethtool_mm_state *state)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_mm_state *mm;
u32 val, add_frag_size;
if (!ocelot->mm_supported)
return -EOPNOTSUPP;
mm = &ocelot->mm[port];
mutex_lock(&ocelot->fwd_domain_lock);
val = ocelot_port_readl(ocelot_port, DEV_MM_ENABLE_CONFIG);
state->pmac_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA);
state->tx_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA);
val = ocelot_port_readl(ocelot_port, DEV_MM_VERIF_CONFIG);
state->verify_enabled = !(val & DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS);
state->verify_time = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(val);
state->max_verify_time = 128;
val = ocelot_read_rix(ocelot, QSYS_PREEMPTION_CFG, port);
add_frag_size = QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(val);
state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(add_frag_size);
state->rx_min_frag_size = ETH_ZLEN;
ocelot_mm_update_port_status(ocelot, port);
state->verify_status = mm->verify_status;
state->tx_active = mm->tx_active;
mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_port_get_mm);
int ocelot_mm_init(struct ocelot *ocelot)
{
struct ocelot_port *ocelot_port;
struct ocelot_mm_state *mm;
int port;
if (!ocelot->mm_supported)
return 0;
ocelot->mm = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(*ocelot->mm), GFP_KERNEL);
if (!ocelot->mm)
return -ENOMEM;
for (port = 0; port < ocelot->num_phys_ports; port++) {
u32 val;
mm = &ocelot->mm[port];
ocelot_port = ocelot->ports[port];
/* Update initial status variable for the
* verification state machine
*/
val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS);
mm->verify_status = ocelot_mm_verify_status(val);
}
return 0;
}
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_mm.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot PTP clock driver
*
* Copyright (c) 2017 Microsemi Corporation
* Copyright 2020 NXP
*/
#include <linux/time64.h>
#include <linux/dsa/ocelot.h>
#include <linux/ptp_classify.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
#include "ocelot.h"
int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
unsigned long flags;
time64_t s;
u32 val;
s64 ns;
spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
s <<= 32;
s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
/* Deal with negative values */
if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
s--;
ns &= 0xf;
ns += 999999984;
}
set_normalized_timespec64(ts, s, ns);
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_gettime64);
int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
unsigned long flags;
u32 val;
spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
TOD_ACC_PIN);
ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
TOD_ACC_PIN);
ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
if (ocelot->ops->tas_clock_adjust)
ocelot->ops->tas_clock_adjust(ocelot);
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_settime64);
int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
struct ocelot *ocelot = container_of(ptp, struct ocelot,
ptp_info);
unsigned long flags;
u32 val;
spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
PTP_PIN_CFG_DOM);
val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK |
PTP_PIN_CFG_DOM);
val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
if (ocelot->ops->tas_clock_adjust)
ocelot->ops->tas_clock_adjust(ocelot);
} else {
/* Fall back using ocelot_ptp_settime64 which is not exact. */
struct timespec64 ts;
u64 now;
ocelot_ptp_gettime64(ptp, &ts);
now = ktime_to_ns(timespec64_to_ktime(ts));
ts = ns_to_timespec64(now + delta);
ocelot_ptp_settime64(ptp, &ts);
}
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_adjtime);
int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
u32 unit = 0, direction = 0;
unsigned long flags;
u64 adj = 0;
spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
if (!scaled_ppm)
goto disable_adj;
if (scaled_ppm < 0) {
direction = PTP_CFG_CLK_ADJ_CFG_DIR;
scaled_ppm = -scaled_ppm;
}
adj = PSEC_PER_SEC << 16;
do_div(adj, scaled_ppm);
do_div(adj, 1000);
/* If the adjustment value is too large, use ns instead */
if (adj >= (1L << 30)) {
unit = PTP_CFG_CLK_ADJ_FREQ_NS;
do_div(adj, 1000);
}
/* Still too big */
if (adj >= (1L << 30))
goto disable_adj;
ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
PTP_CLK_CFG_ADJ_CFG);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
return 0;
disable_adj:
ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_adjfine);
int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
switch (func) {
case PTP_PF_NONE:
case PTP_PF_PEROUT:
break;
case PTP_PF_EXTTS:
case PTP_PF_PHYSYNC:
return -1;
}
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_verify);
int ocelot_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
struct timespec64 ts_phase, ts_period;
enum ocelot_ptp_pins ptp_pin;
unsigned long flags;
bool pps = false;
int pin = -1;
s64 wf_high;
s64 wf_low;
u32 val;
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
/* Reject requests with unsupported flags */
if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
PTP_PEROUT_PHASE))
return -EOPNOTSUPP;
pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
if (pin == 0)
ptp_pin = PTP_PIN_0;
else if (pin == 1)
ptp_pin = PTP_PIN_1;
else if (pin == 2)
ptp_pin = PTP_PIN_2;
else if (pin == 3)
ptp_pin = PTP_PIN_3;
else
return -EBUSY;
ts_period.tv_sec = rq->perout.period.sec;
ts_period.tv_nsec = rq->perout.period.nsec;
if (ts_period.tv_sec == 1 && ts_period.tv_nsec == 0)
pps = true;
/* Handle turning off */
if (!on) {
spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
break;
}
if (rq->perout.flags & PTP_PEROUT_PHASE) {
ts_phase.tv_sec = rq->perout.phase.sec;
ts_phase.tv_nsec = rq->perout.phase.nsec;
} else {
/* Compatibility */
ts_phase.tv_sec = rq->perout.start.sec;
ts_phase.tv_nsec = rq->perout.start.nsec;
}
if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
dev_warn(ocelot->dev,
"Absolute start time not supported!\n");
dev_warn(ocelot->dev,
"Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n");
return -EINVAL;
}
/* Calculate waveform high and low times */
if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
struct timespec64 ts_on;
ts_on.tv_sec = rq->perout.on.sec;
ts_on.tv_nsec = rq->perout.on.nsec;
wf_high = timespec64_to_ns(&ts_on);
} else {
if (pps) {
wf_high = 1000;
} else {
wf_high = timespec64_to_ns(&ts_period);
wf_high = div_s64(wf_high, 2);
}
}
wf_low = timespec64_to_ns(&ts_period);
wf_low -= wf_high;
/* Handle PPS request */
if (pps) {
spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
ocelot_write_rix(ocelot, ts_phase.tv_nsec,
PTP_PIN_WF_LOW_PERIOD, ptp_pin);
ocelot_write_rix(ocelot, wf_high,
PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
val |= PTP_PIN_CFG_SYNC;
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
break;
}
/* Handle periodic clock */
if (wf_high > 0x3fffffff || wf_high <= 0x6)
return -EINVAL;
if (wf_low > 0x3fffffff || wf_low <= 0x6)
return -EINVAL;
spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
ocelot_write_rix(ocelot, wf_low, PTP_PIN_WF_LOW_PERIOD,
ptp_pin);
ocelot_write_rix(ocelot, wf_high, PTP_PIN_WF_HIGH_PERIOD,
ptp_pin);
val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
EXPORT_SYMBOL(ocelot_ptp_enable);
static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_ETYPE;
*(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
*(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
}
static void
ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_EV_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
static void
ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_GEN_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
{
unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
return ocelot_trap_add(ocelot, port, l2_cookie, true,
ocelot_populate_l2_ptp_trap_key);
}
static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
{
unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
return ocelot_trap_del(ocelot, port, l2_cookie);
}
static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
{
unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
ocelot_populate_ipv4_ptp_event_trap_key);
if (err)
return err;
err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
ocelot_populate_ipv4_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
return err;
}
static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
{
unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
return err;
}
static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
{
unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
ocelot_populate_ipv6_ptp_event_trap_key);
if (err)
return err;
err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
ocelot_populate_ipv6_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
return err;
}
static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
{
unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
return err;
}
static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
bool l2, bool l4)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
ocelot_port->trap_proto &= ~(OCELOT_PROTO_PTP_L2 |
OCELOT_PROTO_PTP_L4);
if (l2)
err = ocelot_l2_ptp_trap_add(ocelot, port);
else
err = ocelot_l2_ptp_trap_del(ocelot, port);
if (err)
return err;
if (l4) {
err = ocelot_ipv4_ptp_trap_add(ocelot, port);
if (err)
goto err_ipv4;
err = ocelot_ipv6_ptp_trap_add(ocelot, port);
if (err)
goto err_ipv6;
} else {
err = ocelot_ipv4_ptp_trap_del(ocelot, port);
err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
}
if (err)
return err;
if (l2)
ocelot_port->trap_proto |= OCELOT_PROTO_PTP_L2;
if (l4)
ocelot_port->trap_proto |= OCELOT_PROTO_PTP_L4;
return 0;
err_ipv6:
ocelot_ipv4_ptp_trap_del(ocelot, port);
err_ipv4:
if (l2)
ocelot_l2_ptp_trap_del(ocelot, port);
return err;
}
static int ocelot_traps_to_ptp_rx_filter(unsigned int proto)
{
if ((proto & OCELOT_PROTO_PTP_L2) && (proto & OCELOT_PROTO_PTP_L4))
return HWTSTAMP_FILTER_PTP_V2_EVENT;
else if (proto & OCELOT_PROTO_PTP_L2)
return HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else if (proto & OCELOT_PROTO_PTP_L4)
return HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
return HWTSTAMP_FILTER_NONE;
}
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct hwtstamp_config cfg = {};
switch (ocelot_port->ptp_cmd) {
case IFH_REW_OP_TWO_STEP_PTP:
cfg.tx_type = HWTSTAMP_TX_ON;
break;
case IFH_REW_OP_ORIGIN_PTP:
cfg.tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
break;
default:
cfg.tx_type = HWTSTAMP_TX_OFF;
break;
}
cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
EXPORT_SYMBOL(ocelot_hwstamp_get);
int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
bool l2 = false, l4 = false;
struct hwtstamp_config cfg;
int err;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
/* Tx type sanity check */
switch (cfg.tx_type) {
case HWTSTAMP_TX_ON:
ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
/* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
* need to update the origin time.
*/
ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
break;
case HWTSTAMP_TX_OFF:
ocelot_port->ptp_cmd = 0;
break;
default:
return -ERANGE;
}
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
l2 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
l2 = true;
l4 = true;
break;
default:
return -ERANGE;
}
err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
if (err)
return err;
cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto);
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
EXPORT_SYMBOL(ocelot_hwstamp_set);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info)
{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
if (info->phc_index == -1) {
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
return 0;
}
info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
BIT(HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
return 0;
}
EXPORT_SYMBOL(ocelot_get_ts_info);
static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
struct sk_buff *clone)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
unsigned long flags;
spin_lock_irqsave(&ocelot->ts_id_lock, flags);
if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID ||
ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) {
spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
return -EBUSY;
}
skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
/* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */
OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id;
ocelot_port->ts_id++;
if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID)
ocelot_port->ts_id = 0;
ocelot_port->ptp_skbs_in_flight++;
ocelot->ptp_skbs_in_flight++;
skb_queue_tail(&ocelot_port->tx_skbs, clone);
spin_unlock_irqrestore(&ocelot->ts_id_lock, flags);
return 0;
}
static bool ocelot_ptp_is_onestep_sync(struct sk_buff *skb,
unsigned int ptp_class)
{
struct ptp_header *hdr;
u8 msgtype, twostep;
hdr = ptp_parse_header(skb, ptp_class);
if (!hdr)
return false;
msgtype = ptp_get_msgtype(hdr, ptp_class);
twostep = hdr->flag_field[0] & 0x2;
if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0)
return true;
return false;
}
int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port,
struct sk_buff *skb,
struct sk_buff **clone)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 ptp_cmd = ocelot_port->ptp_cmd;
unsigned int ptp_class;
int err;
/* Don't do anything if PTP timestamping not enabled */
if (!ptp_cmd)
return 0;
ptp_class = ptp_classify_raw(skb);
if (ptp_class == PTP_CLASS_NONE)
return -EINVAL;
/* Store ptp_cmd in OCELOT_SKB_CB(skb)->ptp_cmd */
if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
if (ocelot_ptp_is_onestep_sync(skb, ptp_class)) {
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
return 0;
}
/* Fall back to two-step timestamping */
ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
}
if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
*clone = skb_clone_sk(skb);
if (!(*clone))
return -ENOMEM;
err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone);
if (err)
return err;
OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd;
OCELOT_SKB_CB(*clone)->ptp_class = ptp_class;
}
return 0;
}
EXPORT_SYMBOL(ocelot_port_txtstamp_request);
static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
struct timespec64 *ts)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
/* Read current PTP time to get seconds */
val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
/* Read packet HW timestamp from FIFO */
val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
/* Sec has incremented since the ts was registered */
if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
ts->tv_sec--;
spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
}
static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid)
{
struct ptp_header *hdr;
hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class);
if (WARN_ON(!hdr))
return false;
return seqid == ntohs(hdr->sequence_id);
}
void ocelot_get_txtstamp(struct ocelot *ocelot)
{
int budget = OCELOT_PTP_QUEUE_SZ;
while (budget--) {
struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
struct skb_shared_hwtstamps shhwtstamps;
u32 val, id, seqid, txport;
struct ocelot_port *port;
struct timespec64 ts;
unsigned long flags;
val = ocelot_read(ocelot, SYS_PTP_STATUS);
/* Check if a timestamp can be retrieved */
if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
break;
WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
/* Retrieve the ts ID and Tx port */
id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val);
port = ocelot->ports[txport];
spin_lock(&ocelot->ts_id_lock);
port->ptp_skbs_in_flight--;
ocelot->ptp_skbs_in_flight--;
spin_unlock(&ocelot->ts_id_lock);
/* Retrieve its associated skb */
try_again:
spin_lock_irqsave(&port->tx_skbs.lock, flags);
skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) {
if (OCELOT_SKB_CB(skb)->ts_id != id)
continue;
__skb_unlink(skb, &port->tx_skbs);
skb_match = skb;
break;
}
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
if (WARN_ON(!skb_match))
continue;
if (!ocelot_validate_ptp_skb(skb_match, seqid)) {
dev_err_ratelimited(ocelot->dev,
"port %d received stale TX timestamp for seqid %d, discarding\n",
txport, seqid);
dev_kfree_skb_any(skb);
goto try_again;
}
/* Get the h/w timestamp */
ocelot_get_hwtimestamp(ocelot, &ts);
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_complete_tx_timestamp(skb_match, &shhwtstamps);
/* Next ts */
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
int ocelot_init_timestamp(struct ocelot *ocelot,
const struct ptp_clock_info *info)
{
struct ptp_clock *ptp_clock;
int i;
ocelot->ptp_info = *info;
for (i = 0; i < OCELOT_PTP_PINS_NUM; i++) {
struct ptp_pin_desc *p = &ocelot->ptp_pins[i];
snprintf(p->name, sizeof(p->name), "switch_1588_dat%d", i);
p->index = i;
p->func = PTP_PF_NONE;
}
ocelot->ptp_info.pin_config = &ocelot->ptp_pins[0];
ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
if (IS_ERR(ptp_clock))
return PTR_ERR(ptp_clock);
/* Check if PHC support is missing at the configuration level */
if (!ptp_clock)
return 0;
ocelot->ptp_clock = ptp_clock;
ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
return 0;
}
EXPORT_SYMBOL(ocelot_init_timestamp);
int ocelot_deinit_timestamp(struct ocelot *ocelot)
{
if (ocelot->ptp_clock)
ptp_clock_unregister(ocelot->ptp_clock);
return 0;
}
EXPORT_SYMBOL(ocelot_deinit_timestamp);
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_ptp.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017, 2019 Microsemi Corporation
* Copyright 2020-2021 NXP
*/
#include <linux/if_bridge.h>
#include <linux/mrp_bridge.h>
#include <soc/mscc/ocelot_vcap.h>
#include <uapi/linux/mrp_bridge.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
static const u8 mrp_test_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x01 };
static const u8 mrp_control_dmac[] = { 0x01, 0x15, 0x4e, 0x00, 0x00, 0x02 };
static int ocelot_mrp_find_partner_port(struct ocelot *ocelot,
struct ocelot_port *p)
{
int i;
for (i = 0; i < ocelot->num_phys_ports; ++i) {
struct ocelot_port *ocelot_port = ocelot->ports[i];
if (!ocelot_port || p == ocelot_port)
continue;
if (ocelot_port->mrp_ring_id == p->mrp_ring_id)
return i;
}
return -1;
}
static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int id)
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *filter;
block_vcap_is2 = &ocelot->block[VCAP_IS2];
filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, id,
false);
if (!filter)
return 0;
return ocelot_vcap_filter_del(ocelot, filter);
}
static int ocelot_mrp_redirect_add_vcap(struct ocelot *ocelot, int src_port,
int dst_port)
{
const u8 mrp_test_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
struct ocelot_vcap_filter *filter;
int err;
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return -ENOMEM;
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
filter->prio = 1;
filter->id.cookie = OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, src_port);
filter->id.tc_offload = false;
filter->block_id = VCAP_IS2;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
filter->ingress_port_mask = BIT(src_port);
ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
ether_addr_copy(filter->key.etype.dmac.mask, mrp_test_mask);
filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
filter->action.port_mask = BIT(dst_port);
err = ocelot_vcap_filter_add(ocelot, filter, NULL);
if (err)
kfree(filter);
return err;
}
static void ocelot_populate_mrp_trap_key(struct ocelot_vcap_filter *filter)
{
const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
/* Here is possible to use control or test dmac because the mask
* doesn't cover the LSB
*/
ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask);
}
static int ocelot_mrp_trap_add(struct ocelot *ocelot, int port)
{
unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
return ocelot_trap_add(ocelot, port, cookie, false,
ocelot_populate_mrp_trap_key);
}
static int ocelot_mrp_trap_del(struct ocelot *ocelot, int port)
{
unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
return ocelot_trap_del(ocelot, port, cookie);
}
static void ocelot_mrp_save_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
}
static void ocelot_mrp_del_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_STANDALONE_PVID);
ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_STANDALONE_PVID);
}
int ocelot_mrp_add(struct ocelot *ocelot, int port,
const struct switchdev_obj_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_port_private *priv;
struct net_device *dev;
if (!ocelot_port)
return -EOPNOTSUPP;
priv = container_of(ocelot_port, struct ocelot_port_private, port);
dev = priv->dev;
if (mrp->p_port != dev && mrp->s_port != dev)
return 0;
ocelot_port->mrp_ring_id = mrp->ring_id;
return 0;
}
EXPORT_SYMBOL(ocelot_mrp_add);
int ocelot_mrp_del(struct ocelot *ocelot, int port,
const struct switchdev_obj_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port)
return -EOPNOTSUPP;
if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
ocelot_port->mrp_ring_id = 0;
return 0;
}
EXPORT_SYMBOL(ocelot_mrp_del);
int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int dst_port;
int err;
if (!ocelot_port)
return -EOPNOTSUPP;
if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup)
return -EOPNOTSUPP;
if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
ocelot_mrp_save_mac(ocelot, ocelot_port);
if (mrp->ring_role != BR_MRP_RING_ROLE_MRC)
return ocelot_mrp_trap_add(ocelot, port);
dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port);
if (dst_port == -1)
return -EINVAL;
err = ocelot_mrp_redirect_add_vcap(ocelot, port, dst_port);
if (err)
return err;
err = ocelot_mrp_trap_add(ocelot, port);
if (err) {
ocelot_mrp_del_vcap(ocelot,
OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
return err;
}
return 0;
}
EXPORT_SYMBOL(ocelot_mrp_add_ring_role);
int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err, i;
if (!ocelot_port)
return -EOPNOTSUPP;
if (mrp->ring_role != BR_MRP_RING_ROLE_MRC && !mrp->sw_backup)
return -EOPNOTSUPP;
if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
err = ocelot_mrp_trap_del(ocelot, port);
if (err)
return err;
ocelot_mrp_del_vcap(ocelot, OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
for (i = 0; i < ocelot->num_phys_ports; ++i) {
ocelot_port = ocelot->ports[i];
if (!ocelot_port)
continue;
if (ocelot_port->mrp_ring_id != 0)
goto out;
}
ocelot_mrp_del_mac(ocelot, ocelot->ports[port]);
out:
return 0;
}
EXPORT_SYMBOL(ocelot_mrp_del_ring_role);
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_mrp.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
* Copyright (c) 2019 Microsemi Corporation
*/
#include <linux/iopoll.h>
#include <linux/proc_fs.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot_police.h"
#include "ocelot_vcap.h"
#define ENTRY_WIDTH 32
enum vcap_sel {
VCAP_SEL_ENTRY = 0x1,
VCAP_SEL_ACTION = 0x2,
VCAP_SEL_COUNTER = 0x4,
VCAP_SEL_ALL = 0x7,
};
enum vcap_cmd {
VCAP_CMD_WRITE = 0, /* Copy from Cache to TCAM */
VCAP_CMD_READ = 1, /* Copy from TCAM to Cache */
VCAP_CMD_MOVE_UP = 2, /* Move <count> up */
VCAP_CMD_MOVE_DOWN = 3, /* Move <count> down */
VCAP_CMD_INITIALIZE = 4, /* Write all (from cache) */
};
#define VCAP_ENTRY_WIDTH 12 /* Max entry width (32bit words) */
#define VCAP_COUNTER_WIDTH 4 /* Max counter width (32bit words) */
struct vcap_data {
u32 entry[VCAP_ENTRY_WIDTH]; /* ENTRY_DAT */
u32 mask[VCAP_ENTRY_WIDTH]; /* MASK_DAT */
u32 action[VCAP_ENTRY_WIDTH]; /* ACTION_DAT */
u32 counter[VCAP_COUNTER_WIDTH]; /* CNT_DAT */
u32 tg; /* TG_DAT */
u32 type; /* Action type */
u32 tg_sw; /* Current type-group */
u32 cnt; /* Current counter */
u32 key_offset; /* Current entry offset */
u32 action_offset; /* Current action offset */
u32 counter_offset; /* Current counter offset */
u32 tg_value; /* Current type-group value */
u32 tg_mask; /* Current type-group mask */
};
static u32 vcap_read_update_ctrl(struct ocelot *ocelot,
const struct vcap_props *vcap)
{
return ocelot_target_read(ocelot, vcap->target, VCAP_CORE_UPDATE_CTRL);
}
static void vcap_cmd(struct ocelot *ocelot, const struct vcap_props *vcap,
u16 ix, int cmd, int sel)
{
u32 value = (VCAP_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT);
if ((sel & VCAP_SEL_ENTRY) && ix >= vcap->entry_count)
return;
if (!(sel & VCAP_SEL_ENTRY))
value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS;
if (!(sel & VCAP_SEL_ACTION))
value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS;
if (!(sel & VCAP_SEL_COUNTER))
value |= VCAP_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
ocelot_target_write(ocelot, vcap->target, value, VCAP_CORE_UPDATE_CTRL);
read_poll_timeout(vcap_read_update_ctrl, value,
(value & VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
10, 100000, false, ocelot, vcap);
}
/* Convert from 0-based row to VCAP entry row and run command */
static void vcap_row_cmd(struct ocelot *ocelot, const struct vcap_props *vcap,
u32 row, int cmd, int sel)
{
vcap_cmd(ocelot, vcap, vcap->entry_count - row - 1, cmd, sel);
}
static void vcap_entry2cache(struct ocelot *ocelot,
const struct vcap_props *vcap,
struct vcap_data *data)
{
u32 entry_words, i;
entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH);
for (i = 0; i < entry_words; i++) {
ocelot_target_write_rix(ocelot, vcap->target, data->entry[i],
VCAP_CACHE_ENTRY_DAT, i);
ocelot_target_write_rix(ocelot, vcap->target, ~data->mask[i],
VCAP_CACHE_MASK_DAT, i);
}
ocelot_target_write(ocelot, vcap->target, data->tg, VCAP_CACHE_TG_DAT);
}
static void vcap_cache2entry(struct ocelot *ocelot,
const struct vcap_props *vcap,
struct vcap_data *data)
{
u32 entry_words, i;
entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH);
for (i = 0; i < entry_words; i++) {
data->entry[i] = ocelot_target_read_rix(ocelot, vcap->target,
VCAP_CACHE_ENTRY_DAT, i);
// Invert mask
data->mask[i] = ~ocelot_target_read_rix(ocelot, vcap->target,
VCAP_CACHE_MASK_DAT, i);
}
data->tg = ocelot_target_read(ocelot, vcap->target, VCAP_CACHE_TG_DAT);
}
static void vcap_action2cache(struct ocelot *ocelot,
const struct vcap_props *vcap,
struct vcap_data *data)
{
u32 action_words, mask;
int i, width;
/* Encode action type */
width = vcap->action_type_width;
if (width) {
mask = GENMASK(width, 0);
data->action[0] = ((data->action[0] & ~mask) | data->type);
}
action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH);
for (i = 0; i < action_words; i++)
ocelot_target_write_rix(ocelot, vcap->target, data->action[i],
VCAP_CACHE_ACTION_DAT, i);
for (i = 0; i < vcap->counter_words; i++)
ocelot_target_write_rix(ocelot, vcap->target, data->counter[i],
VCAP_CACHE_CNT_DAT, i);
}
static void vcap_cache2action(struct ocelot *ocelot,
const struct vcap_props *vcap,
struct vcap_data *data)
{
u32 action_words;
int i, width;
action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH);
for (i = 0; i < action_words; i++)
data->action[i] = ocelot_target_read_rix(ocelot, vcap->target,
VCAP_CACHE_ACTION_DAT,
i);
for (i = 0; i < vcap->counter_words; i++)
data->counter[i] = ocelot_target_read_rix(ocelot, vcap->target,
VCAP_CACHE_CNT_DAT,
i);
/* Extract action type */
width = vcap->action_type_width;
data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
}
/* Calculate offsets for entry */
static void vcap_data_offset_get(const struct vcap_props *vcap,
struct vcap_data *data, int ix)
{
int num_subwords_per_entry, num_subwords_per_action;
int i, col, offset, num_entries_per_row, base;
u32 width = vcap->tg_width;
switch (data->tg_sw) {
case VCAP_TG_FULL:
num_entries_per_row = 1;
break;
case VCAP_TG_HALF:
num_entries_per_row = 2;
break;
case VCAP_TG_QUARTER:
num_entries_per_row = 4;
break;
default:
return;
}
col = (ix % num_entries_per_row);
num_subwords_per_entry = (vcap->sw_count / num_entries_per_row);
base = (vcap->sw_count - col * num_subwords_per_entry -
num_subwords_per_entry);
data->tg_value = 0;
data->tg_mask = 0;
for (i = 0; i < num_subwords_per_entry; i++) {
offset = ((base + i) * width);
data->tg_value |= (data->tg_sw << offset);
data->tg_mask |= GENMASK(offset + width - 1, offset);
}
/* Calculate key/action/counter offsets */
col = (num_entries_per_row - col - 1);
data->key_offset = (base * vcap->entry_width) / vcap->sw_count;
data->counter_offset = (num_subwords_per_entry * col *
vcap->counter_width);
i = data->type;
width = vcap->action_table[i].width;
num_subwords_per_action = vcap->action_table[i].count;
data->action_offset = ((num_subwords_per_action * col * width) /
num_entries_per_row);
data->action_offset += vcap->action_type_width;
}
static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
{
u32 i, v, m;
for (i = 0; i < len; i++, offset++) {
v = data[offset / ENTRY_WIDTH];
m = (1 << (offset % ENTRY_WIDTH));
if (value & (1 << i))
v |= m;
else
v &= ~m;
data[offset / ENTRY_WIDTH] = v;
}
}
static u32 vcap_data_get(u32 *data, u32 offset, u32 len)
{
u32 i, v, m, value = 0;
for (i = 0; i < len; i++, offset++) {
v = data[offset / ENTRY_WIDTH];
m = (1 << (offset % ENTRY_WIDTH));
if (v & m)
value |= (1 << i);
}
return value;
}
static void vcap_key_field_set(struct vcap_data *data, u32 offset, u32 width,
u32 value, u32 mask)
{
vcap_data_set(data->entry, offset + data->key_offset, width, value);
vcap_data_set(data->mask, offset + data->key_offset, width, mask);
}
static void vcap_key_set(const struct vcap_props *vcap, struct vcap_data *data,
int field, u32 value, u32 mask)
{
u32 offset = vcap->keys[field].offset;
u32 length = vcap->keys[field].length;
vcap_key_field_set(data, offset, length, value, mask);
}
static void vcap_key_bytes_set(const struct vcap_props *vcap,
struct vcap_data *data, int field,
u8 *val, u8 *msk)
{
u32 offset = vcap->keys[field].offset;
u32 count = vcap->keys[field].length;
u32 i, j, n = 0, value = 0, mask = 0;
WARN_ON(count % 8);
/* Data wider than 32 bits are split up in chunks of maximum 32 bits.
* The 32 LSB of the data are written to the 32 MSB of the TCAM.
*/
offset += count;
count /= 8;
for (i = 0; i < count; i++) {
j = (count - i - 1);
value += (val[j] << n);
mask += (msk[j] << n);
n += 8;
if (n == ENTRY_WIDTH || (i + 1) == count) {
offset -= n;
vcap_key_field_set(data, offset, n, value, mask);
n = 0;
value = 0;
mask = 0;
}
}
}
static void vcap_key_l4_port_set(const struct vcap_props *vcap,
struct vcap_data *data, int field,
struct ocelot_vcap_udp_tcp *port)
{
u32 offset = vcap->keys[field].offset;
u32 length = vcap->keys[field].length;
WARN_ON(length != 16);
vcap_key_field_set(data, offset, length, port->value, port->mask);
}
static void vcap_key_bit_set(const struct vcap_props *vcap,
struct vcap_data *data, int field,
enum ocelot_vcap_bit val)
{
u32 value = (val == OCELOT_VCAP_BIT_1 ? 1 : 0);
u32 msk = (val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
u32 offset = vcap->keys[field].offset;
u32 length = vcap->keys[field].length;
WARN_ON(length != 1);
vcap_key_field_set(data, offset, length, value, msk);
}
static void vcap_action_set(const struct vcap_props *vcap,
struct vcap_data *data, int field, u32 value)
{
int offset = vcap->actions[field].offset;
int length = vcap->actions[field].length;
vcap_data_set(data->action, offset + data->action_offset, length,
value);
}
static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
struct ocelot_vcap_filter *filter)
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2];
struct ocelot_vcap_action *a = &filter->action;
vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode);
vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask);
vcap_action_set(vcap, data, VCAP_IS2_ACT_MIRROR_ENA, a->mirror_ena);
vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena);
vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix);
vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num);
vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_COPY_ENA, a->cpu_copy_ena);
}
static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_filter *filter)
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
u32 val, msk, type, type_mask = 0xf, i, count;
struct ocelot_vcap_u64 payload;
struct vcap_data data;
int row = (ix / 2);
memset(&payload, 0, sizeof(payload));
memset(&data, 0, sizeof(data));
/* Read row */
vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
vcap_cache2entry(ocelot, vcap, &data);
vcap_cache2action(ocelot, vcap, &data);
data.tg_sw = VCAP_TG_HALF;
vcap_data_offset_get(vcap, &data, ix);
data.tg = (data.tg & ~data.tg_mask);
if (filter->prio != 0)
data.tg |= data.tg_value;
data.type = IS2_ACTION_TYPE_NORMAL;
vcap_key_set(vcap, &data, VCAP_IS2_HK_PAG, filter->pag, 0xff);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST,
(filter->lookup == 0) ? OCELOT_VCAP_BIT_1 :
OCELOT_VCAP_BIT_0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
OCELOT_VCAP_BIT_ANY);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
vcap_key_set(vcap, &data, VCAP_IS2_HK_VID,
tag->vid.value, tag->vid.mask);
vcap_key_set(vcap, &data, VCAP_IS2_HK_PCP,
tag->pcp.value[0], tag->pcp.mask[0]);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DEI, tag->dei);
switch (filter->key_type) {
case OCELOT_VCAP_KEY_ETYPE: {
struct ocelot_vcap_key_etype *etype = &filter->key.etype;
type = IS2_TYPE_ETYPE;
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
etype->dmac.value, etype->dmac.mask);
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
etype->smac.value, etype->smac.mask);
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
etype->etype.value, etype->etype.mask);
/* Clear unused bits */
vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
0, 0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
0, 0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
0, 0);
vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
etype->data.value, etype->data.mask);
break;
}
case OCELOT_VCAP_KEY_LLC: {
struct ocelot_vcap_key_llc *llc = &filter->key.llc;
type = IS2_TYPE_LLC;
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
llc->dmac.value, llc->dmac.mask);
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
llc->smac.value, llc->smac.mask);
for (i = 0; i < 4; i++) {
payload.value[i] = llc->llc.value[i];
payload.mask[i] = llc->llc.mask[i];
}
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
payload.value, payload.mask);
break;
}
case OCELOT_VCAP_KEY_SNAP: {
struct ocelot_vcap_key_snap *snap = &filter->key.snap;
type = IS2_TYPE_SNAP;
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
snap->dmac.value, snap->dmac.mask);
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
snap->smac.value, snap->smac.mask);
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
filter->key.snap.snap.value,
filter->key.snap.snap.mask);
break;
}
case OCELOT_VCAP_KEY_ARP: {
struct ocelot_vcap_key_arp *arp = &filter->key.arp;
type = IS2_TYPE_ARP;
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
arp->smac.value, arp->smac.mask);
vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
arp->ethernet);
vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
arp->ip);
vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_LEN_OK,
arp->length);
vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
arp->dmac_match);
vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
arp->smac_match);
vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
arp->unknown);
/* OPCODE is inverse, bit 0 is reply flag, bit 1 is RARP flag */
val = ((arp->req == OCELOT_VCAP_BIT_0 ? 1 : 0) |
(arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
(arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
val, msk);
vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
arp->dip.value.addr, arp->dip.mask.addr);
vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
arp->sip.value.addr, arp->sip.mask.addr);
vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
0, 0);
break;
}
case OCELOT_VCAP_KEY_IPV4:
case OCELOT_VCAP_KEY_IPV6: {
enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp;
enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg;
enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh;
struct ocelot_vcap_key_ipv4 *ipv4 = NULL;
struct ocelot_vcap_key_ipv6 *ipv6 = NULL;
struct ocelot_vcap_udp_tcp *sport, *dport;
struct ocelot_vcap_ipv4 sip, dip;
struct ocelot_vcap_u8 proto, ds;
struct ocelot_vcap_u48 *ip_data;
if (filter->key_type == OCELOT_VCAP_KEY_IPV4) {
ipv4 = &filter->key.ipv4;
ttl = ipv4->ttl;
fragment = ipv4->fragment;
options = ipv4->options;
proto = ipv4->proto;
ds = ipv4->ds;
ip_data = &ipv4->data;
sip = ipv4->sip;
dip = ipv4->dip;
sport = &ipv4->sport;
dport = &ipv4->dport;
tcp_fin = ipv4->tcp_fin;
tcp_syn = ipv4->tcp_syn;
tcp_rst = ipv4->tcp_rst;
tcp_psh = ipv4->tcp_psh;
tcp_ack = ipv4->tcp_ack;
tcp_urg = ipv4->tcp_urg;
sip_eq_dip = ipv4->sip_eq_dip;
sport_eq_dport = ipv4->sport_eq_dport;
seq_zero = ipv4->seq_zero;
} else {
ipv6 = &filter->key.ipv6;
ttl = ipv6->ttl;
fragment = OCELOT_VCAP_BIT_ANY;
options = OCELOT_VCAP_BIT_ANY;
proto = ipv6->proto;
ds = ipv6->ds;
ip_data = &ipv6->data;
for (i = 0; i < 8; i++) {
val = ipv6->sip.value[i + 8];
msk = ipv6->sip.mask[i + 8];
if (i < 4) {
dip.value.addr[i] = val;
dip.mask.addr[i] = msk;
} else {
sip.value.addr[i - 4] = val;
sip.mask.addr[i - 4] = msk;
}
}
sport = &ipv6->sport;
dport = &ipv6->dport;
tcp_fin = ipv6->tcp_fin;
tcp_syn = ipv6->tcp_syn;
tcp_rst = ipv6->tcp_rst;
tcp_psh = ipv6->tcp_psh;
tcp_ack = ipv6->tcp_ack;
tcp_urg = ipv6->tcp_urg;
sip_eq_dip = ipv6->sip_eq_dip;
sport_eq_dport = ipv6->sport_eq_dport;
seq_zero = ipv6->seq_zero;
}
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4,
ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_FRAGMENT,
fragment);
vcap_key_set(vcap, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_OPTIONS,
options);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
ttl);
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_TOS,
ds.value, ds.mask);
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_DIP,
dip.value.addr, dip.mask.addr);
vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_SIP,
sip.value.addr, sip.mask.addr);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DIP_EQ_SIP,
sip_eq_dip);
val = proto.value[0];
msk = proto.mask[0];
type = IS2_TYPE_IP_UDP_TCP;
if (msk == 0xff && (val == IPPROTO_TCP || val == IPPROTO_UDP)) {
/* UDP/TCP protocol match */
tcp = (val == IPPROTO_TCP ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp);
vcap_key_l4_port_set(vcap, &data,
VCAP_IS2_HK_L4_DPORT, dport);
vcap_key_l4_port_set(vcap, &data,
VCAP_IS2_HK_L4_SPORT, sport);
vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
sport_eq_dport);
vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_L4_SEQUENCE_EQ0,
seq_zero);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_FIN,
tcp_fin);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_SYN,
tcp_syn);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_RST,
tcp_rst);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_PSH,
tcp_psh);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_ACK,
tcp_ack);
vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_URG,
tcp_urg);
vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_DOM,
0, 0);
vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_VER,
0, 0);
} else {
if (msk == 0) {
/* Any IP protocol match */
type_mask = IS2_TYPE_MASK_IP_ANY;
} else {
/* Non-UDP/TCP protocol match */
type = IS2_TYPE_IP_OTHER;
for (i = 0; i < 6; i++) {
payload.value[i] = ip_data->value[i];
payload.mask[i] = ip_data->mask[i];
}
}
vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_IP4_L3_PROTO,
proto.value, proto.mask);
vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_L3_PAYLOAD,
payload.value, payload.mask);
}
break;
}
case OCELOT_VCAP_KEY_ANY:
default:
type = 0;
type_mask = 0;
count = vcap->entry_width / 2;
/* Iterate over the non-common part of the key and
* clear entry data
*/
for (i = vcap->keys[VCAP_IS2_HK_L2_DMAC].offset;
i < count; i += ENTRY_WIDTH) {
vcap_key_field_set(&data, i, min(32u, count - i), 0, 0);
}
break;
}
vcap_key_set(vcap, &data, VCAP_IS2_TYPE, type, type_mask);
is2_action_set(ocelot, &data, filter);
vcap_data_set(data.counter, data.counter_offset,
vcap->counter_width, filter->stats.pkts);
/* Write row */
vcap_entry2cache(ocelot, vcap, &data);
vcap_action2cache(ocelot, vcap, &data);
vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
}
static void is1_action_set(struct ocelot *ocelot, struct vcap_data *data,
const struct ocelot_vcap_filter *filter)
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
const struct ocelot_vcap_action *a = &filter->action;
vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_REPLACE_ENA,
a->vid_replace_ena);
vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_ADD_VAL, a->vid);
vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT_ENA,
a->vlan_pop_cnt_ena);
vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT,
a->vlan_pop_cnt);
vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_DEI_ENA, a->pcp_dei_ena);
vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_VAL, a->pcp);
vcap_action_set(vcap, data, VCAP_IS1_ACT_DEI_VAL, a->dei);
vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_ENA, a->qos_ena);
vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_VAL, a->qos_val);
vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_OVERRIDE_MASK,
a->pag_override_mask);
vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_VAL, a->pag_val);
}
static void is1_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_filter *filter)
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
struct vcap_data data;
int row = ix / 2;
u32 type;
memset(&data, 0, sizeof(data));
/* Read row */
vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
vcap_cache2entry(ocelot, vcap, &data);
vcap_cache2action(ocelot, vcap, &data);
data.tg_sw = VCAP_TG_HALF;
data.type = IS1_ACTION_TYPE_NORMAL;
vcap_data_offset_get(vcap, &data, ix);
data.tg = (data.tg & ~data.tg_mask);
if (filter->prio != 0)
data.tg |= data.tg_value;
vcap_key_set(vcap, &data, VCAP_IS1_HK_LOOKUP, filter->lookup, 0x3);
vcap_key_set(vcap, &data, VCAP_IS1_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
tag->vid.value, tag->vid.mask);
vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
tag->pcp.value[0], tag->pcp.mask[0]);
type = IS1_TYPE_S1_NORMAL;
switch (filter->key_type) {
case OCELOT_VCAP_KEY_ETYPE: {
struct ocelot_vcap_key_etype *etype = &filter->key.etype;
vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L2_SMAC,
etype->smac.value, etype->smac.mask);
vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
etype->etype.value, etype->etype.mask);
break;
}
case OCELOT_VCAP_KEY_IPV4: {
struct ocelot_vcap_key_ipv4 *ipv4 = &filter->key.ipv4;
struct ocelot_vcap_udp_tcp *sport = &ipv4->sport;
struct ocelot_vcap_udp_tcp *dport = &ipv4->dport;
enum ocelot_vcap_bit tcp_udp = OCELOT_VCAP_BIT_0;
struct ocelot_vcap_u8 proto = ipv4->proto;
struct ocelot_vcap_ipv4 sip = ipv4->sip;
u32 val, msk;
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP_SNAP,
OCELOT_VCAP_BIT_1);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP4,
OCELOT_VCAP_BIT_1);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_ETYPE_LEN,
OCELOT_VCAP_BIT_1);
vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L3_IP4_SIP,
sip.value.addr, sip.mask.addr);
val = proto.value[0];
msk = proto.mask[0];
if ((val == NEXTHDR_TCP || val == NEXTHDR_UDP) && msk == 0xff)
tcp_udp = OCELOT_VCAP_BIT_1;
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP_UDP, tcp_udp);
if (tcp_udp) {
enum ocelot_vcap_bit tcp = OCELOT_VCAP_BIT_0;
if (val == NEXTHDR_TCP)
tcp = OCELOT_VCAP_BIT_1;
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP, tcp);
vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_L4_SPORT,
sport);
/* Overloaded field */
vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_ETYPE,
dport);
} else {
/* IPv4 "other" frame */
struct ocelot_vcap_u16 etype = {0};
/* Overloaded field */
etype.value[0] = proto.value[0];
etype.mask[0] = proto.mask[0];
vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
etype.value, etype.mask);
}
break;
}
default:
break;
}
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TYPE,
type ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
is1_action_set(ocelot, &data, filter);
vcap_data_set(data.counter, data.counter_offset,
vcap->counter_width, filter->stats.pkts);
/* Write row */
vcap_entry2cache(ocelot, vcap, &data);
vcap_action2cache(ocelot, vcap, &data);
vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
}
static void es0_action_set(struct ocelot *ocelot, struct vcap_data *data,
const struct ocelot_vcap_filter *filter)
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
const struct ocelot_vcap_action *a = &filter->action;
vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_OUTER_TAG,
a->push_outer_tag);
vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_INNER_TAG,
a->push_inner_tag);
vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_TPID_SEL,
a->tag_a_tpid_sel);
vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_VID_SEL,
a->tag_a_vid_sel);
vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_PCP_SEL,
a->tag_a_pcp_sel);
vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_A_VAL, a->vid_a_val);
vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_A_VAL, a->pcp_a_val);
vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_TPID_SEL,
a->tag_b_tpid_sel);
vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_VID_SEL,
a->tag_b_vid_sel);
vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_PCP_SEL,
a->tag_b_pcp_sel);
vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_B_VAL, a->vid_b_val);
vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_B_VAL, a->pcp_b_val);
}
static void es0_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_filter *filter)
{
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
struct vcap_data data;
int row = ix;
memset(&data, 0, sizeof(data));
/* Read row */
vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
vcap_cache2entry(ocelot, vcap, &data);
vcap_cache2action(ocelot, vcap, &data);
data.tg_sw = VCAP_TG_FULL;
data.type = ES0_ACTION_TYPE_NORMAL;
vcap_data_offset_get(vcap, &data, ix);
data.tg = (data.tg & ~data.tg_mask);
if (filter->prio != 0)
data.tg |= data.tg_value;
vcap_key_set(vcap, &data, VCAP_ES0_IGR_PORT, filter->ingress_port.value,
filter->ingress_port.mask);
vcap_key_set(vcap, &data, VCAP_ES0_EGR_PORT, filter->egress_port.value,
filter->egress_port.mask);
vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_MC, filter->dmac_mc);
vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_BC, filter->dmac_bc);
vcap_key_set(vcap, &data, VCAP_ES0_VID,
tag->vid.value, tag->vid.mask);
vcap_key_set(vcap, &data, VCAP_ES0_PCP,
tag->pcp.value[0], tag->pcp.mask[0]);
es0_action_set(ocelot, &data, filter);
vcap_data_set(data.counter, data.counter_offset,
vcap->counter_width, filter->stats.pkts);
/* Write row */
vcap_entry2cache(ocelot, vcap, &data);
vcap_action2cache(ocelot, vcap, &data);
vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
}
static void vcap_entry_get(struct ocelot *ocelot, int ix,
struct ocelot_vcap_filter *filter)
{
const struct vcap_props *vcap = &ocelot->vcap[filter->block_id];
struct vcap_data data;
int row, count;
u32 cnt;
if (filter->block_id == VCAP_ES0)
data.tg_sw = VCAP_TG_FULL;
else
data.tg_sw = VCAP_TG_HALF;
count = (1 << (data.tg_sw - 1));
row = (ix / count);
vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
vcap_cache2action(ocelot, vcap, &data);
vcap_data_offset_get(vcap, &data, ix);
cnt = vcap_data_get(data.counter, data.counter_offset,
vcap->counter_width);
filter->stats.pkts = cnt;
}
static void vcap_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_filter *filter)
{
if (filter->block_id == VCAP_IS1)
return is1_entry_set(ocelot, ix, filter);
if (filter->block_id == VCAP_IS2)
return is2_entry_set(ocelot, ix, filter);
if (filter->block_id == VCAP_ES0)
return es0_entry_set(ocelot, ix, filter);
}
struct vcap_policer_entry {
struct list_head list;
refcount_t refcount;
u32 pol_ix;
};
int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
struct ocelot_policer *pol)
{
struct qos_policer_conf pp = { 0 };
struct vcap_policer_entry *tmp;
int ret;
if (!pol)
return -EINVAL;
pp.mode = MSCC_QOS_RATE_MODE_DATA;
pp.pir = pol->rate;
pp.pbs = pol->burst;
list_for_each_entry(tmp, &ocelot->vcap_pol.pol_list, list)
if (tmp->pol_ix == pol_ix) {
refcount_inc(&tmp->refcount);
return 0;
}
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
ret = qos_policer_conf_set(ocelot, pol_ix, &pp);
if (ret) {
kfree(tmp);
return ret;
}
tmp->pol_ix = pol_ix;
refcount_set(&tmp->refcount, 1);
list_add_tail(&tmp->list, &ocelot->vcap_pol.pol_list);
return 0;
}
EXPORT_SYMBOL(ocelot_vcap_policer_add);
int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix)
{
struct qos_policer_conf pp = {0};
struct vcap_policer_entry *tmp, *n;
u8 z = 0;
list_for_each_entry_safe(tmp, n, &ocelot->vcap_pol.pol_list, list)
if (tmp->pol_ix == pol_ix) {
z = refcount_dec_and_test(&tmp->refcount);
if (z) {
list_del(&tmp->list);
kfree(tmp);
}
}
if (z) {
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
return qos_policer_conf_set(ocelot, pol_ix, &pp);
}
return 0;
}
EXPORT_SYMBOL(ocelot_vcap_policer_del);
static int
ocelot_vcap_filter_add_aux_resources(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter,
struct netlink_ext_ack *extack)
{
struct ocelot_mirror *m;
int ret;
if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena) {
m = ocelot_mirror_get(ocelot, filter->egress_port.value,
extack);
if (IS_ERR(m))
return PTR_ERR(m);
}
if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
ret = ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
&filter->action.pol);
if (ret)
return ret;
}
return 0;
}
static void
ocelot_vcap_filter_del_aux_resources(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
if (filter->block_id == VCAP_IS2 && filter->action.police_ena)
ocelot_vcap_policer_del(ocelot, filter->action.pol_ix);
if (filter->block_id == VCAP_IS2 && filter->action.mirror_ena)
ocelot_mirror_put(ocelot);
}
static int ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter,
struct netlink_ext_ack *extack)
{
struct list_head *pos = &block->rules;
struct ocelot_vcap_filter *tmp;
int ret;
ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack);
if (ret)
return ret;
block->count++;
list_for_each_entry(tmp, &block->rules, list) {
if (filter->prio < tmp->prio) {
pos = &tmp->list;
break;
}
}
list_add_tail(&filter->list, pos);
return 0;
}
static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a,
const struct ocelot_vcap_filter *b)
{
return !memcmp(&a->id, &b->id, sizeof(struct ocelot_vcap_id));
}
static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_filter *tmp;
int index = 0;
list_for_each_entry(tmp, &block->rules, list) {
if (ocelot_vcap_filter_equal(filter, tmp))
return index;
index++;
}
return -ENOENT;
}
static struct ocelot_vcap_filter*
ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
int index)
{
struct ocelot_vcap_filter *tmp;
int i = 0;
list_for_each_entry(tmp, &block->rules, list) {
if (i == index)
return tmp;
++i;
}
return NULL;
}
struct ocelot_vcap_filter *
ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
unsigned long cookie, bool tc_offload)
{
struct ocelot_vcap_filter *filter;
list_for_each_entry(filter, &block->rules, list)
if (filter->id.tc_offload == tc_offload &&
filter->id.cookie == cookie)
return filter;
return NULL;
}
EXPORT_SYMBOL(ocelot_vcap_block_find_filter_by_id);
/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
* on destination and source MAC addresses, but only on higher-level protocol
* information. The only frame types to match on keys containing MAC addresses
* in this case are non-SNAP, non-ARP, non-IP and non-OAM frames.
*
* If @on=true, then the above frame types (SNAP, ARP, IP and OAM) will match
* on MAC_ETYPE keys such as destination and source MAC on this ingress port.
* However the setting has the side effect of making these frames not matching
* on any _other_ keys than MAC_ETYPE ones.
*/
static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port,
int lookup, bool on)
{
u32 val = 0;
if (on)
val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) |
ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) |
ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) |
ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) |
ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup));
ocelot_rmw_gix(ocelot, val,
ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) |
ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) |
ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) |
ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) |
ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup)),
ANA_PORT_VCAP_S2_CFG, port);
}
static bool
ocelot_vcap_is_problematic_mac_etype(struct ocelot_vcap_filter *filter)
{
u16 proto, mask;
if (filter->key_type != OCELOT_VCAP_KEY_ETYPE)
return false;
proto = ntohs(*(__be16 *)filter->key.etype.etype.value);
mask = ntohs(*(__be16 *)filter->key.etype.etype.mask);
/* ETH_P_ALL match, so all protocols below are included */
if (mask == 0)
return true;
if (proto == ETH_P_ARP)
return true;
if (proto == ETH_P_IP)
return true;
if (proto == ETH_P_IPV6)
return true;
return false;
}
static bool
ocelot_vcap_is_problematic_non_mac_etype(struct ocelot_vcap_filter *filter)
{
if (filter->key_type == OCELOT_VCAP_KEY_SNAP)
return true;
if (filter->key_type == OCELOT_VCAP_KEY_ARP)
return true;
if (filter->key_type == OCELOT_VCAP_KEY_IPV4)
return true;
if (filter->key_type == OCELOT_VCAP_KEY_IPV6)
return true;
return false;
}
static bool
ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
struct ocelot_vcap_filter *tmp;
unsigned long port;
int i;
/* We only have the S2_IP_TCPUDP_DIS set of knobs for VCAP IS2 */
if (filter->block_id != VCAP_IS2)
return true;
if (ocelot_vcap_is_problematic_mac_etype(filter)) {
/* Search for any non-MAC_ETYPE rules on the port */
for (i = 0; i < block->count; i++) {
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
if (tmp->ingress_port_mask & filter->ingress_port_mask &&
tmp->lookup == filter->lookup &&
ocelot_vcap_is_problematic_non_mac_etype(tmp))
return false;
}
for_each_set_bit(port, &filter->ingress_port_mask,
ocelot->num_phys_ports)
ocelot_match_all_as_mac_etype(ocelot, port,
filter->lookup, true);
} else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) {
/* Search for any MAC_ETYPE rules on the port */
for (i = 0; i < block->count; i++) {
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
if (tmp->ingress_port_mask & filter->ingress_port_mask &&
tmp->lookup == filter->lookup &&
ocelot_vcap_is_problematic_mac_etype(tmp))
return false;
}
for_each_set_bit(port, &filter->ingress_port_mask,
ocelot->num_phys_ports)
ocelot_match_all_as_mac_etype(ocelot, port,
filter->lookup, false);
}
return true;
}
int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter,
struct netlink_ext_ack *extack)
{
struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
int i, index, ret;
if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot mix MAC_ETYPE with non-MAC_ETYPE rules, use the other IS2 lookup");
return -EBUSY;
}
/* Add filter to the linked list */
ret = ocelot_vcap_filter_add_to_block(ocelot, block, filter, extack);
if (ret)
return ret;
/* Get the index of the inserted filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
if (index < 0)
return index;
/* Move down the rules to make place for the new filter */
for (i = block->count - 1; i > index; i--) {
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
/* Read back the filter's counters before moving it */
vcap_entry_get(ocelot, i - 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
/* Now insert the new filter */
vcap_entry_set(ocelot, index, filter);
return 0;
}
EXPORT_SYMBOL(ocelot_vcap_filter_add);
static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_filter *tmp, *n;
list_for_each_entry_safe(tmp, n, &block->rules, list) {
if (ocelot_vcap_filter_equal(filter, tmp)) {
ocelot_vcap_filter_del_aux_resources(ocelot, tmp);
list_del(&tmp->list);
kfree(tmp);
}
}
block->count--;
}
int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
struct ocelot_vcap_filter del_filter;
int i, index;
/* Need to inherit the block_id so that vcap_entry_set()
* does not get confused and knows where to install it.
*/
memset(&del_filter, 0, sizeof(del_filter));
del_filter.block_id = filter->block_id;
/* Gets index of the filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
if (index < 0)
return index;
/* Delete filter */
ocelot_vcap_block_remove_filter(ocelot, block, filter);
/* Move up all the blocks over the deleted filter */
for (i = index; i < block->count; i++) {
struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i);
/* Read back the filter's counters before moving it */
vcap_entry_get(ocelot, i + 1, tmp);
vcap_entry_set(ocelot, i, tmp);
}
/* Now delete the last filter, because it is duplicated */
vcap_entry_set(ocelot, block->count, &del_filter);
return 0;
}
EXPORT_SYMBOL(ocelot_vcap_filter_del);
int ocelot_vcap_filter_replace(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
int index;
index = ocelot_vcap_block_get_filter_index(block, filter);
if (index < 0)
return index;
vcap_entry_set(ocelot, index, filter);
return 0;
}
EXPORT_SYMBOL(ocelot_vcap_filter_replace);
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
struct ocelot_vcap_filter tmp;
int index;
index = ocelot_vcap_block_get_filter_index(block, filter);
if (index < 0)
return index;
vcap_entry_get(ocelot, index, filter);
/* After we get the result we need to clear the counters */
tmp = *filter;
tmp.stats.pkts = 0;
vcap_entry_set(ocelot, index, &tmp);
return 0;
}
static void ocelot_vcap_init_one(struct ocelot *ocelot,
const struct vcap_props *vcap)
{
struct vcap_data data;
memset(&data, 0, sizeof(data));
vcap_entry2cache(ocelot, vcap, &data);
ocelot_target_write(ocelot, vcap->target, vcap->entry_count,
VCAP_CORE_MV_CFG);
vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
vcap_action2cache(ocelot, vcap, &data);
ocelot_target_write(ocelot, vcap->target, vcap->action_count,
VCAP_CORE_MV_CFG);
vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE,
VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
}
static void ocelot_vcap_detect_constants(struct ocelot *ocelot,
struct vcap_props *vcap)
{
int counter_memory_width;
int num_default_actions;
int version;
version = ocelot_target_read(ocelot, vcap->target,
VCAP_CONST_VCAP_VER);
/* Only version 0 VCAP supported for now */
if (WARN_ON(version != 0))
return;
/* Width in bits of type-group field */
vcap->tg_width = ocelot_target_read(ocelot, vcap->target,
VCAP_CONST_ENTRY_TG_WIDTH);
/* Number of subwords per TCAM row */
vcap->sw_count = ocelot_target_read(ocelot, vcap->target,
VCAP_CONST_ENTRY_SWCNT);
/* Number of rows in TCAM. There can be this many full keys, or double
* this number half keys, or 4 times this number quarter keys.
*/
vcap->entry_count = ocelot_target_read(ocelot, vcap->target,
VCAP_CONST_ENTRY_CNT);
/* Assuming there are 4 subwords per TCAM row, their layout in the
* actual TCAM (not in the cache) would be:
*
* | SW 3 | TG 3 | SW 2 | TG 2 | SW 1 | TG 1 | SW 0 | TG 0 |
*
* (where SW=subword and TG=Type-Group).
*
* What VCAP_CONST_ENTRY_CNT is giving us is the width of one full TCAM
* row. But when software accesses the TCAM through the cache
* registers, the Type-Group values are written through another set of
* registers VCAP_TG_DAT, and therefore, it appears as though the 4
* subwords are contiguous in the cache memory.
* Important mention: regardless of the number of key entries per row
* (and therefore of key size: 1 full key or 2 half keys or 4 quarter
* keys), software always has to configure 4 Type-Group values. For
* example, in the case of 1 full key, the driver needs to set all 4
* Type-Group to be full key.
*
* For this reason, we need to fix up the value that the hardware is
* giving us. We don't actually care about the width of the entry in
* the TCAM. What we care about is the width of the entry in the cache
* registers, which is how we get to interact with it. And since the
* VCAP_ENTRY_DAT cache registers access only the subwords and not the
* Type-Groups, this means we need to subtract the width of the
* Type-Groups when packing and unpacking key entry data in a TCAM row.
*/
vcap->entry_width = ocelot_target_read(ocelot, vcap->target,
VCAP_CONST_ENTRY_WIDTH);
vcap->entry_width -= vcap->tg_width * vcap->sw_count;
num_default_actions = ocelot_target_read(ocelot, vcap->target,
VCAP_CONST_ACTION_DEF_CNT);
vcap->action_count = vcap->entry_count + num_default_actions;
vcap->action_width = ocelot_target_read(ocelot, vcap->target,
VCAP_CONST_ACTION_WIDTH);
/* The width of the counter memory, this is the complete width of all
* counter-fields associated with one full-word entry. There is one
* counter per entry sub-word (see CAP_CORE::ENTRY_SWCNT for number of
* subwords.)
*/
vcap->counter_words = vcap->sw_count;
counter_memory_width = ocelot_target_read(ocelot, vcap->target,
VCAP_CONST_CNT_WIDTH);
vcap->counter_width = counter_memory_width / vcap->counter_words;
}
int ocelot_vcap_init(struct ocelot *ocelot)
{
struct qos_policer_conf cpu_drop = {
.mode = MSCC_QOS_RATE_MODE_DATA,
};
int ret, i;
/* Create a policer that will drop the frames for the cpu.
* This policer will be used as action in the acl rules to drop
* frames.
*/
ret = qos_policer_conf_set(ocelot, OCELOT_POLICER_DISCARD, &cpu_drop);
if (ret)
return ret;
for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) {
struct ocelot_vcap_block *block = &ocelot->block[i];
struct vcap_props *vcap = &ocelot->vcap[i];
INIT_LIST_HEAD(&block->rules);
ocelot_vcap_detect_constants(ocelot, vcap);
ocelot_vcap_init_one(ocelot, vcap);
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
INIT_LIST_HEAD(&ocelot->traps);
INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
}
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_vcap.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
* Copyright (c) 2019 Microsemi Corporation
*/
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot_police.h"
#include "ocelot_vcap.h"
/* Arbitrarily chosen constants for encoding the VCAP block and lookup number
* into the chain number. This is UAPI.
*/
#define VCAP_BLOCK 10000
#define VCAP_LOOKUP 1000
#define VCAP_IS1_NUM_LOOKUPS 3
#define VCAP_IS2_NUM_LOOKUPS 2
#define VCAP_IS2_NUM_PAG 256
#define VCAP_IS1_CHAIN(lookup) \
(1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP)
#define VCAP_IS2_CHAIN(lookup, pag) \
(2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag))
/* PSFP chain and block ID */
#define PSFP_BLOCK_ID OCELOT_NUM_VCAP_BLOCKS
#define OCELOT_PSFP_CHAIN (3 * VCAP_BLOCK)
static int ocelot_chain_to_block(int chain, bool ingress)
{
int lookup, pag;
if (!ingress) {
if (chain == 0)
return VCAP_ES0;
return -EOPNOTSUPP;
}
/* Backwards compatibility with older, single-chain tc-flower
* offload support in Ocelot
*/
if (chain == 0)
return VCAP_IS2;
for (lookup = 0; lookup < VCAP_IS1_NUM_LOOKUPS; lookup++)
if (chain == VCAP_IS1_CHAIN(lookup))
return VCAP_IS1;
for (lookup = 0; lookup < VCAP_IS2_NUM_LOOKUPS; lookup++)
for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
if (chain == VCAP_IS2_CHAIN(lookup, pag))
return VCAP_IS2;
if (chain == OCELOT_PSFP_CHAIN)
return PSFP_BLOCK_ID;
return -EOPNOTSUPP;
}
/* Caller must ensure this is a valid IS1 or IS2 chain first,
* by calling ocelot_chain_to_block.
*/
static int ocelot_chain_to_lookup(int chain)
{
/* Backwards compatibility with older, single-chain tc-flower
* offload support in Ocelot
*/
if (chain == 0)
return 0;
return (chain / VCAP_LOOKUP) % 10;
}
/* Caller must ensure this is a valid IS2 chain first,
* by calling ocelot_chain_to_block.
*/
static int ocelot_chain_to_pag(int chain)
{
int lookup;
/* Backwards compatibility with older, single-chain tc-flower
* offload support in Ocelot
*/
if (chain == 0)
return 0;
lookup = ocelot_chain_to_lookup(chain);
/* calculate PAG value as chain index relative to the first PAG */
return chain - VCAP_IS2_CHAIN(lookup, 0);
}
static bool ocelot_is_goto_target_valid(int goto_target, int chain,
bool ingress)
{
int pag;
/* Can't offload GOTO in VCAP ES0 */
if (!ingress)
return (goto_target < 0);
/* Non-optional GOTOs */
if (chain == 0)
/* VCAP IS1 can be skipped, either partially or completely */
return (goto_target == VCAP_IS1_CHAIN(0) ||
goto_target == VCAP_IS1_CHAIN(1) ||
goto_target == VCAP_IS1_CHAIN(2) ||
goto_target == VCAP_IS2_CHAIN(0, 0) ||
goto_target == VCAP_IS2_CHAIN(1, 0) ||
goto_target == OCELOT_PSFP_CHAIN);
if (chain == VCAP_IS1_CHAIN(0))
return (goto_target == VCAP_IS1_CHAIN(1));
if (chain == VCAP_IS1_CHAIN(1))
return (goto_target == VCAP_IS1_CHAIN(2));
/* Lookup 2 of VCAP IS1 can really support non-optional GOTOs,
* using a Policy Association Group (PAG) value, which is an 8-bit
* value encoding a VCAP IS2 target chain.
*/
if (chain == VCAP_IS1_CHAIN(2)) {
for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
if (goto_target == VCAP_IS2_CHAIN(0, pag))
return true;
return false;
}
/* Non-optional GOTO from VCAP IS2 lookup 0 to lookup 1.
* We cannot change the PAG at this point.
*/
for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
if (chain == VCAP_IS2_CHAIN(0, pag))
return (goto_target == VCAP_IS2_CHAIN(1, pag));
/* VCAP IS2 lookup 1 can goto to PSFP block if hardware support */
for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
if (chain == VCAP_IS2_CHAIN(1, pag))
return (goto_target == OCELOT_PSFP_CHAIN);
return false;
}
static struct ocelot_vcap_filter *
ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain)
{
struct ocelot_vcap_filter *filter;
struct ocelot_vcap_block *block;
int block_id;
block_id = ocelot_chain_to_block(chain, true);
if (block_id < 0)
return NULL;
if (block_id == VCAP_IS2) {
block = &ocelot->block[VCAP_IS1];
list_for_each_entry(filter, &block->rules, list)
if (filter->type == OCELOT_VCAP_FILTER_PAG &&
filter->goto_target == chain)
return filter;
}
list_for_each_entry(filter, &ocelot->dummy_rules, list)
if (filter->goto_target == chain)
return filter;
return NULL;
}
static int
ocelot_flower_parse_ingress_vlan_modify(struct ocelot *ocelot, int port,
struct ocelot_vcap_filter *filter,
const struct flow_action_entry *a,
struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
if (!ocelot_port->vlan_aware) {
NL_SET_ERR_MSG_MOD(extack,
"Can only modify VLAN under VLAN aware bridge");
return -EOPNOTSUPP;
}
filter->action.vid_replace_ena = true;
filter->action.pcp_dei_ena = true;
filter->action.vid = a->vlan.vid;
filter->action.pcp = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
return 0;
}
static int
ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter,
const struct flow_action_entry *a,
struct netlink_ext_ack *extack)
{
enum ocelot_tag_tpid_sel tpid;
switch (ntohs(a->vlan.proto)) {
case ETH_P_8021Q:
tpid = OCELOT_TAG_TPID_SEL_8021Q;
break;
case ETH_P_8021AD:
tpid = OCELOT_TAG_TPID_SEL_8021AD;
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Cannot modify custom TPID");
return -EOPNOTSUPP;
}
filter->action.tag_a_tpid_sel = tpid;
filter->action.push_outer_tag = OCELOT_ES0_TAG;
filter->action.tag_a_vid_sel = OCELOT_ES0_VID_PLUS_CLASSIFIED_VID;
filter->action.vid_a_val = a->vlan.vid;
filter->action.pcp_a_val = a->vlan.prio;
filter->action.tag_a_pcp_sel = OCELOT_ES0_PCP;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
return 0;
}
static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
const struct flow_action *action = &f->rule->action;
struct netlink_ext_ack *extack = f->common.extack;
bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
enum ocelot_tag_tpid_sel tpid;
int i, chain, egress_port;
u32 pol_ix, pol_max;
u64 rate;
int err;
if (!flow_action_basic_hw_stats_check(&f->rule->action,
f->common.extack))
return -EOPNOTSUPP;
chain = f->common.chain_index;
filter->block_id = ocelot_chain_to_block(chain, ingress);
if (filter->block_id < 0) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
return -EOPNOTSUPP;
}
if (filter->block_id == VCAP_IS1 || filter->block_id == VCAP_IS2)
filter->lookup = ocelot_chain_to_lookup(chain);
if (filter->block_id == VCAP_IS2)
filter->pag = ocelot_chain_to_pag(chain);
filter->goto_target = -1;
filter->type = OCELOT_VCAP_FILTER_DUMMY;
flow_action_for_each(i, a, action) {
switch (a->id) {
case FLOW_ACTION_DROP:
if (filter->block_id != VCAP_IS2) {
NL_SET_ERR_MSG_MOD(extack,
"Drop action can only be offloaded to VCAP IS2");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
filter->action.port_mask = 0;
filter->action.police_ena = true;
filter->action.pol_ix = OCELOT_POLICER_DISCARD;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_ACCEPT:
if (filter->block_id != VCAP_ES0 &&
filter->block_id != VCAP_IS1 &&
filter->block_id != VCAP_IS2) {
NL_SET_ERR_MSG_MOD(extack,
"Accept action can only be offloaded to VCAP chains");
return -EOPNOTSUPP;
}
if (filter->block_id != VCAP_ES0 &&
filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_TRAP:
if (filter->block_id != VCAP_IS2 ||
filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
"Trap action can only be offloaded to VCAP IS2 lookup 0");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
filter->action.port_mask = 0;
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = 0;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
filter->is_trap = true;
break;
case FLOW_ACTION_POLICE:
if (filter->block_id == PSFP_BLOCK_ID) {
filter->type = OCELOT_PSFP_FILTER_OFFLOAD;
break;
}
if (filter->block_id != VCAP_IS2 ||
filter->lookup != 0) {
NL_SET_ERR_MSG_MOD(extack,
"Police action can only be offloaded to VCAP IS2 lookup 0 or PSFP");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
err = ocelot_policer_validate(action, a, extack);
if (err)
return err;
filter->action.police_ena = true;
pol_ix = a->hw_index + ocelot->vcap_pol.base;
pol_max = ocelot->vcap_pol.max;
if (ocelot->vcap_pol.max2 && pol_ix > pol_max) {
pol_ix += ocelot->vcap_pol.base2 - pol_max - 1;
pol_max = ocelot->vcap_pol.max2;
}
if (pol_ix >= pol_max)
return -EINVAL;
filter->action.pol_ix = pol_ix;
rate = a->police.rate_bytes_ps;
filter->action.pol.rate = div_u64(rate, 1000) * 8;
filter->action.pol.burst = a->police.burst;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_REDIRECT:
if (filter->block_id != VCAP_IS2) {
NL_SET_ERR_MSG_MOD(extack,
"Redirect action can only be offloaded to VCAP IS2");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
egress_port = ocelot->ops->netdev_to_port(a->dev);
if (egress_port < 0) {
NL_SET_ERR_MSG_MOD(extack,
"Destination not an ocelot port");
return -EOPNOTSUPP;
}
filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
filter->action.port_mask = BIT(egress_port);
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_MIRRED:
if (filter->block_id != VCAP_IS2) {
NL_SET_ERR_MSG_MOD(extack,
"Mirror action can only be offloaded to VCAP IS2");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
egress_port = ocelot->ops->netdev_to_port(a->dev);
if (egress_port < 0) {
NL_SET_ERR_MSG_MOD(extack,
"Destination not an ocelot port");
return -EOPNOTSUPP;
}
filter->egress_port.value = egress_port;
filter->action.mirror_ena = true;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_VLAN_POP:
if (filter->block_id != VCAP_IS1) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN pop action can only be offloaded to VCAP IS1");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
filter->action.vlan_pop_cnt_ena = true;
filter->action.vlan_pop_cnt++;
if (filter->action.vlan_pop_cnt > 2) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot pop more than 2 VLAN headers");
return -EOPNOTSUPP;
}
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_VLAN_MANGLE:
if (filter->block_id == VCAP_IS1) {
err = ocelot_flower_parse_ingress_vlan_modify(ocelot, port,
filter, a,
extack);
} else if (filter->block_id == VCAP_ES0) {
err = ocelot_flower_parse_egress_vlan_modify(filter, a,
extack);
} else {
NL_SET_ERR_MSG_MOD(extack,
"VLAN modify action can only be offloaded to VCAP IS1 or ES0");
err = -EOPNOTSUPP;
}
if (err)
return err;
break;
case FLOW_ACTION_PRIORITY:
if (filter->block_id != VCAP_IS1) {
NL_SET_ERR_MSG_MOD(extack,
"Priority action can only be offloaded to VCAP IS1");
return -EOPNOTSUPP;
}
if (filter->goto_target != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Last action must be GOTO");
return -EOPNOTSUPP;
}
filter->action.qos_ena = true;
filter->action.qos_val = a->priority;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_GOTO:
filter->goto_target = a->chain_index;
if (filter->block_id == VCAP_IS1 && filter->lookup == 2) {
int pag = ocelot_chain_to_pag(filter->goto_target);
filter->action.pag_override_mask = 0xff;
filter->action.pag_val = pag;
filter->type = OCELOT_VCAP_FILTER_PAG;
}
break;
case FLOW_ACTION_VLAN_PUSH:
if (filter->block_id != VCAP_ES0) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN push action can only be offloaded to VCAP ES0");
return -EOPNOTSUPP;
}
switch (ntohs(a->vlan.proto)) {
case ETH_P_8021Q:
tpid = OCELOT_TAG_TPID_SEL_8021Q;
break;
case ETH_P_8021AD:
tpid = OCELOT_TAG_TPID_SEL_8021AD;
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Cannot push custom TPID");
return -EOPNOTSUPP;
}
filter->action.tag_a_tpid_sel = tpid;
filter->action.push_outer_tag = OCELOT_ES0_TAG;
filter->action.tag_a_vid_sel = OCELOT_ES0_VID;
filter->action.vid_a_val = a->vlan.vid;
filter->action.pcp_a_val = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_GATE:
if (filter->block_id != PSFP_BLOCK_ID) {
NL_SET_ERR_MSG_MOD(extack,
"Gate action can only be offloaded to PSFP chain");
return -EOPNOTSUPP;
}
filter->type = OCELOT_PSFP_FILTER_OFFLOAD;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Cannot offload action");
return -EOPNOTSUPP;
}
}
if (filter->goto_target == -1) {
if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) ||
chain == 0 || filter->block_id == PSFP_BLOCK_ID) {
allow_missing_goto_target = true;
} else {
NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action");
return -EOPNOTSUPP;
}
}
if (!ocelot_is_goto_target_valid(filter->goto_target, chain, ingress) &&
!allow_missing_goto_target) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload this GOTO target");
return -EOPNOTSUPP;
}
return 0;
}
static int ocelot_flower_parse_indev(struct ocelot *ocelot, int port,
struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
int key_length = vcap->keys[VCAP_ES0_IGR_PORT].length;
struct netlink_ext_ack *extack = f->common.extack;
struct net_device *dev, *indev;
struct flow_match_meta match;
int ingress_port;
flow_rule_match_meta(rule, &match);
if (!match.mask->ingress_ifindex)
return 0;
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EOPNOTSUPP;
}
dev = ocelot->ops->port_to_netdev(ocelot, port);
if (!dev)
return -EINVAL;
indev = __dev_get_by_index(dev_net(dev), match.key->ingress_ifindex);
if (!indev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't find the ingress port to match on");
return -ENOENT;
}
ingress_port = ocelot->ops->netdev_to_port(indev);
if (ingress_port < 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can only offload an ocelot ingress port");
return -EOPNOTSUPP;
}
if (ingress_port == port) {
NL_SET_ERR_MSG_MOD(extack,
"Ingress port is equal to the egress port");
return -EINVAL;
}
filter->ingress_port.value = ingress_port;
filter->ingress_port.mask = GENMASK(key_length - 1, 0);
return 0;
}
static int
ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = f->common.extack;
u16 proto = ntohs(f->common.protocol);
bool match_protocol = true;
int ret;
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_META) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
struct flow_match_meta match;
flow_rule_match_meta(rule, &match);
if (match.mask->l2_miss) {
NL_SET_ERR_MSG_MOD(extack, "Can't match on \"l2_miss\"");
return -EOPNOTSUPP;
}
}
/* For VCAP ES0 (egress rewriter) we can match on the ingress port */
if (!ingress) {
ret = ocelot_flower_parse_indev(ocelot, port, f, filter);
if (ret)
return ret;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
flow_rule_match_control(rule, &match);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
filter->key_type = OCELOT_VCAP_KEY_ANY;
filter->vlan.vid.value = match.key->vlan_id;
filter->vlan.vid.mask = match.mask->vlan_id;
filter->vlan.pcp.value[0] = match.key->vlan_priority;
filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
if (filter->block_id == VCAP_ES0) {
NL_SET_ERR_MSG_MOD(extack,
"VCAP ES0 cannot match on MAC address");
return -EOPNOTSUPP;
}
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
* then just bail out
*/
if ((dissector->used_keys &
(BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL))) !=
(BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)))
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
if (filter->block_id == VCAP_IS1 &&
!is_zero_ether_addr(match.mask->dst)) {
NL_SET_ERR_MSG_MOD(extack,
"Key type S1_NORMAL cannot match on destination MAC");
return -EOPNOTSUPP;
}
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
ether_addr_copy(filter->key.etype.dmac.value,
match.key->dst);
ether_addr_copy(filter->key.etype.smac.value,
match.key->src);
ether_addr_copy(filter->key.etype.dmac.mask,
match.mask->dst);
ether_addr_copy(filter->key.etype.smac.mask,
match.mask->src);
goto finished_key_parsing;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
if (ntohs(match.key->n_proto) == ETH_P_IP) {
if (filter->block_id == VCAP_ES0) {
NL_SET_ERR_MSG_MOD(extack,
"VCAP ES0 cannot match on IP protocol");
return -EOPNOTSUPP;
}
filter->key_type = OCELOT_VCAP_KEY_IPV4;
filter->key.ipv4.proto.value[0] =
match.key->ip_proto;
filter->key.ipv4.proto.mask[0] =
match.mask->ip_proto;
match_protocol = false;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
if (filter->block_id == VCAP_ES0) {
NL_SET_ERR_MSG_MOD(extack,
"VCAP ES0 cannot match on IP protocol");
return -EOPNOTSUPP;
}
filter->key_type = OCELOT_VCAP_KEY_IPV6;
filter->key.ipv6.proto.value[0] =
match.key->ip_proto;
filter->key.ipv6.proto.mask[0] =
match.mask->ip_proto;
match_protocol = false;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) &&
proto == ETH_P_IP) {
struct flow_match_ipv4_addrs match;
u8 *tmp;
if (filter->block_id == VCAP_ES0) {
NL_SET_ERR_MSG_MOD(extack,
"VCAP ES0 cannot match on IP address");
return -EOPNOTSUPP;
}
flow_rule_match_ipv4_addrs(rule, &match);
if (filter->block_id == VCAP_IS1 && *(u32 *)&match.mask->dst) {
NL_SET_ERR_MSG_MOD(extack,
"Key type S1_NORMAL cannot match on destination IP");
return -EOPNOTSUPP;
}
tmp = &filter->key.ipv4.sip.value.addr[0];
memcpy(tmp, &match.key->src, 4);
tmp = &filter->key.ipv4.sip.mask.addr[0];
memcpy(tmp, &match.mask->src, 4);
tmp = &filter->key.ipv4.dip.value.addr[0];
memcpy(tmp, &match.key->dst, 4);
tmp = &filter->key.ipv4.dip.mask.addr[0];
memcpy(tmp, &match.mask->dst, 4);
match_protocol = false;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) &&
proto == ETH_P_IPV6) {
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
if (filter->block_id == VCAP_ES0) {
NL_SET_ERR_MSG_MOD(extack,
"VCAP ES0 cannot match on L4 ports");
return -EOPNOTSUPP;
}
flow_rule_match_ports(rule, &match);
filter->key.ipv4.sport.value = ntohs(match.key->src);
filter->key.ipv4.sport.mask = ntohs(match.mask->src);
filter->key.ipv4.dport.value = ntohs(match.key->dst);
filter->key.ipv4.dport.mask = ntohs(match.mask->dst);
match_protocol = false;
}
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
if (filter->block_id == VCAP_ES0) {
NL_SET_ERR_MSG_MOD(extack,
"VCAP ES0 cannot match on L2 proto");
return -EOPNOTSUPP;
}
/* TODO: support SNAP, LLC etc */
if (proto < ETH_P_802_3_MIN)
return -EOPNOTSUPP;
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
*(__be16 *)filter->key.etype.etype.value = htons(proto);
*(__be16 *)filter->key.etype.etype.mask = htons(0xffff);
}
/* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */
return 0;
}
static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
int ret;
filter->prio = f->common.prio;
filter->id.cookie = f->cookie;
filter->id.tc_offload = true;
ret = ocelot_flower_parse_action(ocelot, port, ingress, f, filter);
if (ret)
return ret;
/* PSFP filter need to parse key by stream identification function. */
if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD)
return 0;
return ocelot_flower_parse_key(ocelot, port, ingress, f, filter);
}
static struct ocelot_vcap_filter
*ocelot_vcap_filter_create(struct ocelot *ocelot, int port, bool ingress,
struct flow_cls_offload *f)
{
struct ocelot_vcap_filter *filter;
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
if (!filter)
return NULL;
if (ingress) {
filter->ingress_port_mask = BIT(port);
} else {
const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
int key_length = vcap->keys[VCAP_ES0_EGR_PORT].length;
filter->egress_port.value = port;
filter->egress_port.mask = GENMASK(key_length - 1, 0);
}
return filter;
}
static int ocelot_vcap_dummy_filter_add(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
list_add(&filter->list, &ocelot->dummy_rules);
return 0;
}
static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
list_del(&filter->list);
kfree(filter);
return 0;
}
/* If we have an egress VLAN modification rule, we need to actually write the
* delta between the input VLAN (from the key) and the output VLAN (from the
* action), but the action was parsed first. So we need to patch the delta into
* the action here.
*/
static int
ocelot_flower_patch_es0_vlan_modify(struct ocelot_vcap_filter *filter,
struct netlink_ext_ack *extack)
{
if (filter->block_id != VCAP_ES0 ||
filter->action.tag_a_vid_sel != OCELOT_ES0_VID_PLUS_CLASSIFIED_VID)
return 0;
if (filter->vlan.vid.mask != VLAN_VID_MASK) {
NL_SET_ERR_MSG_MOD(extack,
"VCAP ES0 VLAN rewriting needs a full VLAN in the key");
return -EOPNOTSUPP;
}
filter->action.vid_a_val -= filter->vlan.vid.value;
filter->action.vid_a_val &= VLAN_VID_MASK;
return 0;
}
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
struct netlink_ext_ack *extack = f->common.extack;
struct ocelot_vcap_filter *filter;
int chain = f->common.chain_index;
int block_id, ret;
if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) {
NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain");
return -EOPNOTSUPP;
}
block_id = ocelot_chain_to_block(chain, ingress);
if (block_id < 0) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
return -EOPNOTSUPP;
}
filter = ocelot_vcap_block_find_filter_by_id(&ocelot->block[block_id],
f->cookie, true);
if (filter) {
/* Filter already exists on other ports */
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack, "VCAP ES0 does not support shared filters");
return -EOPNOTSUPP;
}
filter->ingress_port_mask |= BIT(port);
return ocelot_vcap_filter_replace(ocelot, filter);
}
/* Filter didn't exist, create it now */
filter = ocelot_vcap_filter_create(ocelot, port, ingress, f);
if (!filter)
return -ENOMEM;
ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
kfree(filter);
return ret;
}
ret = ocelot_flower_patch_es0_vlan_modify(filter, extack);
if (ret) {
kfree(filter);
return ret;
}
/* The non-optional GOTOs for the TCAM skeleton don't need
* to be actually offloaded.
*/
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_add(ocelot, filter);
if (filter->type == OCELOT_PSFP_FILTER_OFFLOAD) {
kfree(filter);
if (ocelot->ops->psfp_filter_add)
return ocelot->ops->psfp_filter_add(ocelot, port, f);
NL_SET_ERR_MSG_MOD(extack, "PSFP chain is not supported in HW");
return -EOPNOTSUPP;
}
return ocelot_vcap_filter_add(ocelot, filter, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
struct ocelot_vcap_filter *filter;
struct ocelot_vcap_block *block;
int block_id;
block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
if (block_id < 0)
return 0;
if (block_id == PSFP_BLOCK_ID) {
if (ocelot->ops->psfp_filter_del)
return ocelot->ops->psfp_filter_del(ocelot, f);
return -EOPNOTSUPP;
}
block = &ocelot->block[block_id];
filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
if (!filter)
return 0;
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_del(ocelot, filter);
if (ingress) {
filter->ingress_port_mask &= ~BIT(port);
if (filter->ingress_port_mask)
return ocelot_vcap_filter_replace(ocelot, filter);
}
return ocelot_vcap_filter_del(ocelot, filter);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
struct ocelot_vcap_filter *filter;
struct ocelot_vcap_block *block;
struct flow_stats stats = {0};
int block_id, ret;
block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
if (block_id < 0)
return 0;
if (block_id == PSFP_BLOCK_ID) {
if (ocelot->ops->psfp_stats_get) {
ret = ocelot->ops->psfp_stats_get(ocelot, f, &stats);
if (ret)
return ret;
goto stats_update;
}
return -EOPNOTSUPP;
}
block = &ocelot->block[block_id];
filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
if (!filter || filter->type == OCELOT_VCAP_FILTER_DUMMY)
return 0;
ret = ocelot_vcap_filter_stats_update(ocelot, filter);
if (ret)
return ret;
stats.pkts = filter->stats.pkts;
stats_update:
flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, 0x0,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats);
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_flower.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Microsemi SoCs FDMA driver
*
* Copyright (c) 2021 Microchip
*
* Page recycling code is mostly taken from gianfar driver.
*/
#include <linux/align.h>
#include <linux/bitops.h>
#include <linux/dmapool.h>
#include <linux/dsa/ocelot.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include "ocelot_fdma.h"
#include "ocelot_qs.h"
DEFINE_STATIC_KEY_FALSE(ocelot_fdma_enabled);
static void ocelot_fdma_writel(struct ocelot *ocelot, u32 reg, u32 data)
{
regmap_write(ocelot->targets[FDMA], reg, data);
}
static u32 ocelot_fdma_readl(struct ocelot *ocelot, u32 reg)
{
u32 retval;
regmap_read(ocelot->targets[FDMA], reg, &retval);
return retval;
}
static dma_addr_t ocelot_fdma_idx_dma(dma_addr_t base, u16 idx)
{
return base + idx * sizeof(struct ocelot_fdma_dcb);
}
static u16 ocelot_fdma_dma_idx(dma_addr_t base, dma_addr_t dma)
{
return (dma - base) / sizeof(struct ocelot_fdma_dcb);
}
static u16 ocelot_fdma_idx_next(u16 idx, u16 ring_sz)
{
return unlikely(idx == ring_sz - 1) ? 0 : idx + 1;
}
static u16 ocelot_fdma_idx_prev(u16 idx, u16 ring_sz)
{
return unlikely(idx == 0) ? ring_sz - 1 : idx - 1;
}
static int ocelot_fdma_rx_ring_free(struct ocelot_fdma *fdma)
{
struct ocelot_fdma_rx_ring *rx_ring = &fdma->rx_ring;
if (rx_ring->next_to_use >= rx_ring->next_to_clean)
return OCELOT_FDMA_RX_RING_SIZE -
(rx_ring->next_to_use - rx_ring->next_to_clean) - 1;
else
return rx_ring->next_to_clean - rx_ring->next_to_use - 1;
}
static int ocelot_fdma_tx_ring_free(struct ocelot_fdma *fdma)
{
struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
if (tx_ring->next_to_use >= tx_ring->next_to_clean)
return OCELOT_FDMA_TX_RING_SIZE -
(tx_ring->next_to_use - tx_ring->next_to_clean) - 1;
else
return tx_ring->next_to_clean - tx_ring->next_to_use - 1;
}
static bool ocelot_fdma_tx_ring_empty(struct ocelot_fdma *fdma)
{
struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
return tx_ring->next_to_clean == tx_ring->next_to_use;
}
static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,
int chan)
{
ocelot_fdma_writel(ocelot, MSCC_FDMA_DCB_LLP(chan), dma);
/* Barrier to force memory writes to DCB to be completed before starting
* the channel.
*/
wmb();
ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));
}
static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot)
{
return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE);
}
static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)
{
u32 safe;
return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe,
safe & BIT(chan), 0,
OCELOT_FDMA_CH_SAFE_TIMEOUT_US);
}
static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb,
dma_addr_t dma_addr,
size_t size)
{
u32 offset = dma_addr & 0x3;
dcb->llp = 0;
dcb->datap = ALIGN_DOWN(dma_addr, 4);
dcb->datal = ALIGN_DOWN(size, 4);
dcb->stat = MSCC_FDMA_DCB_STAT_BLOCKO(offset);
}
static bool ocelot_fdma_rx_alloc_page(struct ocelot *ocelot,
struct ocelot_fdma_rx_buf *rxb)
{
dma_addr_t mapping;
struct page *page;
page = dev_alloc_page();
if (unlikely(!page))
return false;
mapping = dma_map_page(ocelot->dev, page, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ocelot->dev, mapping))) {
__free_page(page);
return false;
}
rxb->page = page;
rxb->page_offset = 0;
rxb->dma_addr = mapping;
return true;
}
static int ocelot_fdma_alloc_rx_buffs(struct ocelot *ocelot, u16 alloc_cnt)
{
struct ocelot_fdma *fdma = ocelot->fdma;
struct ocelot_fdma_rx_ring *rx_ring;
struct ocelot_fdma_rx_buf *rxb;
struct ocelot_fdma_dcb *dcb;
dma_addr_t dma_addr;
int ret = 0;
u16 idx;
rx_ring = &fdma->rx_ring;
idx = rx_ring->next_to_use;
while (alloc_cnt--) {
rxb = &rx_ring->bufs[idx];
/* try reuse page */
if (unlikely(!rxb->page)) {
if (unlikely(!ocelot_fdma_rx_alloc_page(ocelot, rxb))) {
dev_err_ratelimited(ocelot->dev,
"Failed to allocate rx\n");
ret = -ENOMEM;
break;
}
}
dcb = &rx_ring->dcbs[idx];
dma_addr = rxb->dma_addr + rxb->page_offset;
ocelot_fdma_dcb_set_data(dcb, dma_addr, OCELOT_FDMA_RXB_SIZE);
idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
/* Chain the DCB to the next one */
dcb->llp = ocelot_fdma_idx_dma(rx_ring->dcbs_dma, idx);
}
rx_ring->next_to_use = idx;
rx_ring->next_to_alloc = idx;
return ret;
}
static bool ocelot_fdma_tx_dcb_set_skb(struct ocelot *ocelot,
struct ocelot_fdma_tx_buf *tx_buf,
struct ocelot_fdma_dcb *dcb,
struct sk_buff *skb)
{
dma_addr_t mapping;
mapping = dma_map_single(ocelot->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ocelot->dev, mapping)))
return false;
dma_unmap_addr_set(tx_buf, dma_addr, mapping);
ocelot_fdma_dcb_set_data(dcb, mapping, OCELOT_FDMA_RX_SIZE);
tx_buf->skb = skb;
dcb->stat |= MSCC_FDMA_DCB_STAT_BLOCKL(skb->len);
dcb->stat |= MSCC_FDMA_DCB_STAT_SOF | MSCC_FDMA_DCB_STAT_EOF;
return true;
}
static bool ocelot_fdma_check_stop_rx(struct ocelot *ocelot)
{
u32 llp;
/* Check if the FDMA hits the DCB with LLP == NULL */
llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP(MSCC_FDMA_XTR_CHAN));
if (unlikely(llp))
return false;
ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_DISABLE,
BIT(MSCC_FDMA_XTR_CHAN));
return true;
}
static void ocelot_fdma_rx_set_llp(struct ocelot_fdma_rx_ring *rx_ring)
{
struct ocelot_fdma_dcb *dcb;
unsigned int idx;
idx = ocelot_fdma_idx_prev(rx_ring->next_to_use,
OCELOT_FDMA_RX_RING_SIZE);
dcb = &rx_ring->dcbs[idx];
dcb->llp = 0;
}
static void ocelot_fdma_rx_restart(struct ocelot *ocelot)
{
struct ocelot_fdma *fdma = ocelot->fdma;
struct ocelot_fdma_rx_ring *rx_ring;
const u8 chan = MSCC_FDMA_XTR_CHAN;
dma_addr_t new_llp, dma_base;
unsigned int idx;
u32 llp_prev;
int ret;
rx_ring = &fdma->rx_ring;
ret = ocelot_fdma_wait_chan_safe(ocelot, chan);
if (ret) {
dev_err_ratelimited(ocelot->dev,
"Unable to stop RX channel\n");
return;
}
ocelot_fdma_rx_set_llp(rx_ring);
/* FDMA stopped on the last DCB that contained a NULL LLP, since
* we processed some DCBs in RX, there is free space, and we must set
* DCB_LLP to point to the next DCB
*/
llp_prev = ocelot_fdma_readl(ocelot, MSCC_FDMA_DCB_LLP_PREV(chan));
dma_base = rx_ring->dcbs_dma;
/* Get the next DMA addr located after LLP == NULL DCB */
idx = ocelot_fdma_dma_idx(dma_base, llp_prev);
idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
new_llp = ocelot_fdma_idx_dma(dma_base, idx);
/* Finally reactivate the channel */
ocelot_fdma_activate_chan(ocelot, new_llp, chan);
}
static bool ocelot_fdma_add_rx_frag(struct ocelot_fdma_rx_buf *rxb, u32 stat,
struct sk_buff *skb, bool first)
{
int size = MSCC_FDMA_DCB_STAT_BLOCKL(stat);
struct page *page = rxb->page;
if (likely(first)) {
skb_put(skb, size);
} else {
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
rxb->page_offset, size, OCELOT_FDMA_RX_SIZE);
}
/* Try to reuse page */
if (unlikely(page_ref_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* Change offset to the other half */
rxb->page_offset ^= OCELOT_FDMA_RX_SIZE;
page_ref_inc(page);
return true;
}
static void ocelot_fdma_reuse_rx_page(struct ocelot *ocelot,
struct ocelot_fdma_rx_buf *old_rxb)
{
struct ocelot_fdma_rx_ring *rx_ring = &ocelot->fdma->rx_ring;
struct ocelot_fdma_rx_buf *new_rxb;
new_rxb = &rx_ring->bufs[rx_ring->next_to_alloc];
rx_ring->next_to_alloc = ocelot_fdma_idx_next(rx_ring->next_to_alloc,
OCELOT_FDMA_RX_RING_SIZE);
/* Copy page reference */
*new_rxb = *old_rxb;
/* Sync for use by the device */
dma_sync_single_range_for_device(ocelot->dev, old_rxb->dma_addr,
old_rxb->page_offset,
OCELOT_FDMA_RX_SIZE, DMA_FROM_DEVICE);
}
static struct sk_buff *ocelot_fdma_get_skb(struct ocelot *ocelot, u32 stat,
struct ocelot_fdma_rx_buf *rxb,
struct sk_buff *skb)
{
bool first = false;
/* Allocate skb head and data */
if (likely(!skb)) {
void *buff_addr = page_address(rxb->page) +
rxb->page_offset;
skb = build_skb(buff_addr, OCELOT_FDMA_SKBFRAG_SIZE);
if (unlikely(!skb)) {
dev_err_ratelimited(ocelot->dev,
"build_skb failed !\n");
return NULL;
}
first = true;
}
dma_sync_single_range_for_cpu(ocelot->dev, rxb->dma_addr,
rxb->page_offset, OCELOT_FDMA_RX_SIZE,
DMA_FROM_DEVICE);
if (ocelot_fdma_add_rx_frag(rxb, stat, skb, first)) {
/* Reuse the free half of the page for the next_to_alloc DCB*/
ocelot_fdma_reuse_rx_page(ocelot, rxb);
} else {
/* page cannot be reused, unmap it */
dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
}
/* clear rx buff content */
rxb->page = NULL;
return skb;
}
static bool ocelot_fdma_receive_skb(struct ocelot *ocelot, struct sk_buff *skb)
{
struct net_device *ndev;
void *xfh = skb->data;
u64 timestamp;
u64 src_port;
skb_pull(skb, OCELOT_TAG_LEN);
ocelot_xfh_get_src_port(xfh, &src_port);
if (unlikely(src_port >= ocelot->num_phys_ports))
return false;
ndev = ocelot_port_to_netdev(ocelot, src_port);
if (unlikely(!ndev))
return false;
if (pskb_trim(skb, skb->len - ETH_FCS_LEN))
return false;
skb->dev = ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
skb->dev->stats.rx_bytes += skb->len;
skb->dev->stats.rx_packets++;
if (ocelot->ptp) {
ocelot_xfh_get_rew_val(xfh, ×tamp);
ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
}
if (likely(!skb_defer_rx_timestamp(skb)))
netif_receive_skb(skb);
return true;
}
static int ocelot_fdma_rx_get(struct ocelot *ocelot, int budget)
{
struct ocelot_fdma *fdma = ocelot->fdma;
struct ocelot_fdma_rx_ring *rx_ring;
struct ocelot_fdma_rx_buf *rxb;
struct ocelot_fdma_dcb *dcb;
struct sk_buff *skb;
int work_done = 0;
int cleaned_cnt;
u32 stat;
u16 idx;
cleaned_cnt = ocelot_fdma_rx_ring_free(fdma);
rx_ring = &fdma->rx_ring;
skb = rx_ring->skb;
while (budget--) {
idx = rx_ring->next_to_clean;
dcb = &rx_ring->dcbs[idx];
stat = dcb->stat;
if (MSCC_FDMA_DCB_STAT_BLOCKL(stat) == 0)
break;
/* New packet is a start of frame but we already got a skb set,
* we probably lost an EOF packet, free skb
*/
if (unlikely(skb && (stat & MSCC_FDMA_DCB_STAT_SOF))) {
dev_kfree_skb(skb);
skb = NULL;
}
rxb = &rx_ring->bufs[idx];
/* Fetch next to clean buffer from the rx_ring */
skb = ocelot_fdma_get_skb(ocelot, stat, rxb, skb);
if (unlikely(!skb))
break;
work_done++;
cleaned_cnt++;
idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
rx_ring->next_to_clean = idx;
if (unlikely(stat & MSCC_FDMA_DCB_STAT_ABORT ||
stat & MSCC_FDMA_DCB_STAT_PD)) {
dev_err_ratelimited(ocelot->dev,
"DCB aborted or pruned\n");
dev_kfree_skb(skb);
skb = NULL;
continue;
}
/* We still need to process the other fragment of the packet
* before delivering it to the network stack
*/
if (!(stat & MSCC_FDMA_DCB_STAT_EOF))
continue;
if (unlikely(!ocelot_fdma_receive_skb(ocelot, skb)))
dev_kfree_skb(skb);
skb = NULL;
}
rx_ring->skb = skb;
if (cleaned_cnt)
ocelot_fdma_alloc_rx_buffs(ocelot, cleaned_cnt);
return work_done;
}
static void ocelot_fdma_wakeup_netdev(struct ocelot *ocelot)
{
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct net_device *dev;
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
priv = container_of(ocelot_port, struct ocelot_port_private,
port);
dev = priv->dev;
if (unlikely(netif_queue_stopped(dev)))
netif_wake_queue(dev);
}
}
static void ocelot_fdma_tx_cleanup(struct ocelot *ocelot, int budget)
{
struct ocelot_fdma *fdma = ocelot->fdma;
struct ocelot_fdma_tx_ring *tx_ring;
struct ocelot_fdma_tx_buf *buf;
unsigned int new_null_llp_idx;
struct ocelot_fdma_dcb *dcb;
bool end_of_list = false;
struct sk_buff *skb;
dma_addr_t dma;
u32 dcb_llp;
u16 ntc;
int ret;
tx_ring = &fdma->tx_ring;
/* Purge the TX packets that have been sent up to the NULL llp or the
* end of done list.
*/
while (!ocelot_fdma_tx_ring_empty(fdma)) {
ntc = tx_ring->next_to_clean;
dcb = &tx_ring->dcbs[ntc];
if (!(dcb->stat & MSCC_FDMA_DCB_STAT_PD))
break;
buf = &tx_ring->bufs[ntc];
skb = buf->skb;
dma_unmap_single(ocelot->dev, dma_unmap_addr(buf, dma_addr),
skb->len, DMA_TO_DEVICE);
napi_consume_skb(skb, budget);
dcb_llp = dcb->llp;
/* Only update after accessing all dcb fields */
tx_ring->next_to_clean = ocelot_fdma_idx_next(ntc,
OCELOT_FDMA_TX_RING_SIZE);
/* If we hit the NULL LLP, stop, we might need to reload FDMA */
if (dcb_llp == 0) {
end_of_list = true;
break;
}
}
/* No need to try to wake if there were no TX cleaned_cnt up. */
if (ocelot_fdma_tx_ring_free(fdma))
ocelot_fdma_wakeup_netdev(ocelot);
/* If there is still some DCBs to be processed by the FDMA or if the
* pending list is empty, there is no need to restart the FDMA.
*/
if (!end_of_list || ocelot_fdma_tx_ring_empty(fdma))
return;
ret = ocelot_fdma_wait_chan_safe(ocelot, MSCC_FDMA_INJ_CHAN);
if (ret) {
dev_warn(ocelot->dev,
"Failed to wait for TX channel to stop\n");
return;
}
/* Set NULL LLP to be the last DCB used */
new_null_llp_idx = ocelot_fdma_idx_prev(tx_ring->next_to_use,
OCELOT_FDMA_TX_RING_SIZE);
dcb = &tx_ring->dcbs[new_null_llp_idx];
dcb->llp = 0;
dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, tx_ring->next_to_clean);
ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
}
static int ocelot_fdma_napi_poll(struct napi_struct *napi, int budget)
{
struct ocelot_fdma *fdma = container_of(napi, struct ocelot_fdma, napi);
struct ocelot *ocelot = fdma->ocelot;
int work_done = 0;
bool rx_stopped;
ocelot_fdma_tx_cleanup(ocelot, budget);
rx_stopped = ocelot_fdma_check_stop_rx(ocelot);
work_done = ocelot_fdma_rx_get(ocelot, budget);
if (rx_stopped)
ocelot_fdma_rx_restart(ocelot);
if (work_done < budget) {
napi_complete_done(&fdma->napi, work_done);
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
BIT(MSCC_FDMA_INJ_CHAN) |
BIT(MSCC_FDMA_XTR_CHAN));
}
return work_done;
}
static irqreturn_t ocelot_fdma_interrupt(int irq, void *dev_id)
{
u32 ident, llp, frm, err, err_code;
struct ocelot *ocelot = dev_id;
ident = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_IDENT);
frm = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_FRM);
llp = ocelot_fdma_readl(ocelot, MSCC_FDMA_INTR_LLP);
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, llp & ident);
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, frm & ident);
if (frm || llp) {
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
napi_schedule(&ocelot->fdma->napi);
}
err = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR);
if (unlikely(err)) {
err_code = ocelot_fdma_readl(ocelot, MSCC_FDMA_EVT_ERR_CODE);
dev_err_ratelimited(ocelot->dev,
"Error ! chans mask: %#x, code: %#x\n",
err, err_code);
ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR, err);
ocelot_fdma_writel(ocelot, MSCC_FDMA_EVT_ERR_CODE, err_code);
}
return IRQ_HANDLED;
}
static void ocelot_fdma_send_skb(struct ocelot *ocelot,
struct ocelot_fdma *fdma, struct sk_buff *skb)
{
struct ocelot_fdma_tx_ring *tx_ring = &fdma->tx_ring;
struct ocelot_fdma_tx_buf *tx_buf;
struct ocelot_fdma_dcb *dcb;
dma_addr_t dma;
u16 next_idx;
dcb = &tx_ring->dcbs[tx_ring->next_to_use];
tx_buf = &tx_ring->bufs[tx_ring->next_to_use];
if (!ocelot_fdma_tx_dcb_set_skb(ocelot, tx_buf, dcb, skb)) {
dev_kfree_skb_any(skb);
return;
}
next_idx = ocelot_fdma_idx_next(tx_ring->next_to_use,
OCELOT_FDMA_TX_RING_SIZE);
skb_tx_timestamp(skb);
/* If the FDMA TX chan is empty, then enqueue the DCB directly */
if (ocelot_fdma_tx_ring_empty(fdma)) {
dma = ocelot_fdma_idx_dma(tx_ring->dcbs_dma,
tx_ring->next_to_use);
ocelot_fdma_activate_chan(ocelot, dma, MSCC_FDMA_INJ_CHAN);
} else {
/* Chain the DCBs */
dcb->llp = ocelot_fdma_idx_dma(tx_ring->dcbs_dma, next_idx);
}
tx_ring->next_to_use = next_idx;
}
static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
struct sk_buff *skb, struct net_device *dev)
{
int needed_headroom = max_t(int, OCELOT_TAG_LEN - skb_headroom(skb), 0);
int needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
void *ifh;
int err;
if (unlikely(needed_headroom || needed_tailroom ||
skb_header_cloned(skb))) {
err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
GFP_ATOMIC);
if (unlikely(err)) {
dev_kfree_skb_any(skb);
return 1;
}
}
err = skb_linearize(skb);
if (err) {
net_err_ratelimited("%s: skb_linearize error (%d)!\n",
dev->name, err);
dev_kfree_skb_any(skb);
return 1;
}
ifh = skb_push(skb, OCELOT_TAG_LEN);
skb_put(skb, ETH_FCS_LEN);
memset(ifh, 0, OCELOT_TAG_LEN);
ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
return 0;
}
int ocelot_fdma_inject_frame(struct ocelot *ocelot, int port, u32 rew_op,
struct sk_buff *skb, struct net_device *dev)
{
struct ocelot_fdma *fdma = ocelot->fdma;
int ret = NETDEV_TX_OK;
spin_lock(&fdma->tx_ring.xmit_lock);
if (ocelot_fdma_tx_ring_free(fdma) == 0) {
netif_stop_queue(dev);
ret = NETDEV_TX_BUSY;
goto out;
}
if (ocelot_fdma_prepare_skb(ocelot, port, rew_op, skb, dev))
goto out;
ocelot_fdma_send_skb(ocelot, fdma, skb);
out:
spin_unlock(&fdma->tx_ring.xmit_lock);
return ret;
}
static void ocelot_fdma_free_rx_ring(struct ocelot *ocelot)
{
struct ocelot_fdma *fdma = ocelot->fdma;
struct ocelot_fdma_rx_ring *rx_ring;
struct ocelot_fdma_rx_buf *rxb;
u16 idx;
rx_ring = &fdma->rx_ring;
idx = rx_ring->next_to_clean;
/* Free the pages held in the RX ring */
while (idx != rx_ring->next_to_use) {
rxb = &rx_ring->bufs[idx];
dma_unmap_page(ocelot->dev, rxb->dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(rxb->page);
idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_RX_RING_SIZE);
}
if (fdma->rx_ring.skb)
dev_kfree_skb_any(fdma->rx_ring.skb);
}
static void ocelot_fdma_free_tx_ring(struct ocelot *ocelot)
{
struct ocelot_fdma *fdma = ocelot->fdma;
struct ocelot_fdma_tx_ring *tx_ring;
struct ocelot_fdma_tx_buf *txb;
struct sk_buff *skb;
u16 idx;
tx_ring = &fdma->tx_ring;
idx = tx_ring->next_to_clean;
while (idx != tx_ring->next_to_use) {
txb = &tx_ring->bufs[idx];
skb = txb->skb;
dma_unmap_single(ocelot->dev, dma_unmap_addr(txb, dma_addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
idx = ocelot_fdma_idx_next(idx, OCELOT_FDMA_TX_RING_SIZE);
}
}
static int ocelot_fdma_rings_alloc(struct ocelot *ocelot)
{
struct ocelot_fdma *fdma = ocelot->fdma;
struct ocelot_fdma_dcb *dcbs;
unsigned int adjust;
dma_addr_t dcbs_dma;
int ret;
/* Create a pool of consistent memory blocks for hardware descriptors */
fdma->dcbs_base = dmam_alloc_coherent(ocelot->dev,
OCELOT_DCBS_HW_ALLOC_SIZE,
&fdma->dcbs_dma_base, GFP_KERNEL);
if (!fdma->dcbs_base)
return -ENOMEM;
/* DCBs must be aligned on a 32bit boundary */
dcbs = fdma->dcbs_base;
dcbs_dma = fdma->dcbs_dma_base;
if (!IS_ALIGNED(dcbs_dma, 4)) {
adjust = dcbs_dma & 0x3;
dcbs_dma = ALIGN(dcbs_dma, 4);
dcbs = (void *)dcbs + adjust;
}
/* TX queue */
fdma->tx_ring.dcbs = dcbs;
fdma->tx_ring.dcbs_dma = dcbs_dma;
spin_lock_init(&fdma->tx_ring.xmit_lock);
/* RX queue */
fdma->rx_ring.dcbs = dcbs + OCELOT_FDMA_TX_RING_SIZE;
fdma->rx_ring.dcbs_dma = dcbs_dma + OCELOT_FDMA_TX_DCB_SIZE;
ret = ocelot_fdma_alloc_rx_buffs(ocelot,
ocelot_fdma_tx_ring_free(fdma));
if (ret) {
ocelot_fdma_free_rx_ring(ocelot);
return ret;
}
/* Set the last DCB LLP as NULL, this is normally done when restarting
* the RX chan, but this is for the first run
*/
ocelot_fdma_rx_set_llp(&fdma->rx_ring);
return 0;
}
void ocelot_fdma_netdev_init(struct ocelot *ocelot, struct net_device *dev)
{
struct ocelot_fdma *fdma = ocelot->fdma;
dev->needed_headroom = OCELOT_TAG_LEN;
dev->needed_tailroom = ETH_FCS_LEN;
if (fdma->ndev)
return;
fdma->ndev = dev;
netif_napi_add_weight(dev, &fdma->napi, ocelot_fdma_napi_poll,
OCELOT_FDMA_WEIGHT);
}
void ocelot_fdma_netdev_deinit(struct ocelot *ocelot, struct net_device *dev)
{
struct ocelot_fdma *fdma = ocelot->fdma;
if (fdma->ndev == dev) {
netif_napi_del(&fdma->napi);
fdma->ndev = NULL;
}
}
void ocelot_fdma_init(struct platform_device *pdev, struct ocelot *ocelot)
{
struct device *dev = ocelot->dev;
struct ocelot_fdma *fdma;
int ret;
fdma = devm_kzalloc(dev, sizeof(*fdma), GFP_KERNEL);
if (!fdma)
return;
ocelot->fdma = fdma;
ocelot->dev->coherent_dma_mask = DMA_BIT_MASK(32);
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
fdma->ocelot = ocelot;
fdma->irq = platform_get_irq_byname(pdev, "fdma");
ret = devm_request_irq(dev, fdma->irq, ocelot_fdma_interrupt, 0,
dev_name(dev), ocelot);
if (ret)
goto err_free_fdma;
ret = ocelot_fdma_rings_alloc(ocelot);
if (ret)
goto err_free_irq;
static_branch_enable(&ocelot_fdma_enabled);
return;
err_free_irq:
devm_free_irq(dev, fdma->irq, fdma);
err_free_fdma:
devm_kfree(dev, fdma);
ocelot->fdma = NULL;
}
void ocelot_fdma_start(struct ocelot *ocelot)
{
struct ocelot_fdma *fdma = ocelot->fdma;
/* Reconfigure for extraction and injection using DMA */
ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_MODE(2), QS_INJ_GRP_CFG, 0);
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(0), QS_INJ_CTRL, 0);
ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_MODE(2), QS_XTR_GRP_CFG, 0);
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP, 0xffffffff);
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM, 0xffffffff);
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_LLP_ENA,
BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_FRM_ENA,
BIT(MSCC_FDMA_XTR_CHAN));
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA,
BIT(MSCC_FDMA_INJ_CHAN) | BIT(MSCC_FDMA_XTR_CHAN));
napi_enable(&fdma->napi);
ocelot_fdma_activate_chan(ocelot, ocelot->fdma->rx_ring.dcbs_dma,
MSCC_FDMA_XTR_CHAN);
}
void ocelot_fdma_deinit(struct ocelot *ocelot)
{
struct ocelot_fdma *fdma = ocelot->fdma;
ocelot_fdma_writel(ocelot, MSCC_FDMA_INTR_ENA, 0);
ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
BIT(MSCC_FDMA_XTR_CHAN));
ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_FORCEDIS,
BIT(MSCC_FDMA_INJ_CHAN));
napi_synchronize(&fdma->napi);
napi_disable(&fdma->napi);
ocelot_fdma_free_rx_ring(ocelot);
ocelot_fdma_free_tx_ring(ocelot);
}
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_fdma.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
* Copyright (c) 2019 Microsemi Corporation
*/
#include <soc/mscc/ocelot.h>
#include "ocelot_police.h"
/* Types for ANA:POL[0-192]:POL_MODE_CFG.FRM_MODE */
#define POL_MODE_LINERATE 0 /* Incl IPG. Unit: 33 1/3 kbps, 4096 bytes */
#define POL_MODE_DATARATE 1 /* Excl IPG. Unit: 33 1/3 kbps, 4096 bytes */
#define POL_MODE_FRMRATE_HI 2 /* Unit: 33 1/3 fps, 32.8 frames */
#define POL_MODE_FRMRATE_LO 3 /* Unit: 1/3 fps, 0.3 frames */
/* Policer indexes */
#define POL_IX_PORT 0 /* 0-11 : Port policers */
#define POL_IX_QUEUE 32 /* 32-127 : Queue policers */
/* Default policer order */
#define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */
int qos_policer_conf_set(struct ocelot *ocelot, u32 pol_ix,
struct qos_policer_conf *conf)
{
u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
bool cir_discard = 0, pir_discard = 0;
u32 pbs_max = 0, cbs_max = 0;
u8 ipg = 20;
u32 value;
pir = conf->pir;
pbs = conf->pbs;
switch (conf->mode) {
case MSCC_QOS_RATE_MODE_LINE:
case MSCC_QOS_RATE_MODE_DATA:
if (conf->mode == MSCC_QOS_RATE_MODE_LINE) {
frm_mode = POL_MODE_LINERATE;
ipg = min_t(u8, GENMASK(4, 0), conf->ipg);
} else {
frm_mode = POL_MODE_DATARATE;
}
if (conf->dlb) {
cir_ena = 1;
cir = conf->cir;
cbs = conf->cbs;
if (cir == 0 && cbs == 0) {
/* Discard cir frames */
cir_discard = 1;
} else {
cir = DIV_ROUND_UP(cir, 100);
cir *= 3; /* 33 1/3 kbps */
cbs = DIV_ROUND_UP(cbs, 4096);
cbs = (cbs ? cbs : 1); /* No zero burst size */
cbs_max = 60; /* Limit burst size */
cf = conf->cf;
if (cf)
pir += conf->cir;
}
}
if (pir == 0 && pbs == 0) {
/* Discard PIR frames */
pir_discard = 1;
} else {
pir = DIV_ROUND_UP(pir, 100);
pir *= 3; /* 33 1/3 kbps */
pbs = DIV_ROUND_UP(pbs, 4096);
pbs = (pbs ? pbs : 1); /* No zero burst size */
pbs_max = 60; /* Limit burst size */
}
break;
case MSCC_QOS_RATE_MODE_FRAME:
if (pir >= 100) {
frm_mode = POL_MODE_FRMRATE_HI;
pir = DIV_ROUND_UP(pir, 100);
pir *= 3; /* 33 1/3 fps */
pbs = (pbs * 10) / 328; /* 32.8 frames */
pbs = (pbs ? pbs : 1); /* No zero burst size */
pbs_max = GENMASK(6, 0); /* Limit burst size */
} else {
frm_mode = POL_MODE_FRMRATE_LO;
if (pir == 0 && pbs == 0) {
/* Discard all frames */
pir_discard = 1;
cir_discard = 1;
} else {
pir *= 3; /* 1/3 fps */
pbs = (pbs * 10) / 3; /* 0.3 frames */
pbs = (pbs ? pbs : 1); /* No zero burst size */
pbs_max = 61; /* Limit burst size */
}
}
break;
default: /* MSCC_QOS_RATE_MODE_DISABLED */
/* Disable policer using maximum rate and zero burst */
pir = GENMASK(15, 0);
pbs = 0;
break;
}
/* Check limits */
if (pir > GENMASK(15, 0)) {
dev_err(ocelot->dev,
"Invalid pir for policer %u: %u (max %lu)\n",
pol_ix, pir, GENMASK(15, 0));
return -EINVAL;
}
if (cir > GENMASK(15, 0)) {
dev_err(ocelot->dev,
"Invalid cir for policer %u: %u (max %lu)\n",
pol_ix, cir, GENMASK(15, 0));
return -EINVAL;
}
if (pbs > pbs_max) {
dev_err(ocelot->dev,
"Invalid pbs for policer %u: %u (max %u)\n",
pol_ix, pbs, pbs_max);
return -EINVAL;
}
if (cbs > cbs_max) {
dev_err(ocelot->dev,
"Invalid cbs for policer %u: %u (max %u)\n",
pol_ix, cbs, cbs_max);
return -EINVAL;
}
value = (ANA_POL_MODE_CFG_IPG_SIZE(ipg) |
ANA_POL_MODE_CFG_FRM_MODE(frm_mode) |
(cf ? ANA_POL_MODE_CFG_DLB_COUPLED : 0) |
(cir_ena ? ANA_POL_MODE_CFG_CIR_ENA : 0) |
ANA_POL_MODE_CFG_OVERSHOOT_ENA);
ocelot_write_gix(ocelot, value, ANA_POL_MODE_CFG, pol_ix);
ocelot_write_gix(ocelot,
ANA_POL_PIR_CFG_PIR_RATE(pir) |
ANA_POL_PIR_CFG_PIR_BURST(pbs),
ANA_POL_PIR_CFG, pol_ix);
ocelot_write_gix(ocelot,
(pir_discard ? GENMASK(22, 0) : 0),
ANA_POL_PIR_STATE, pol_ix);
ocelot_write_gix(ocelot,
ANA_POL_CIR_CFG_CIR_RATE(cir) |
ANA_POL_CIR_CFG_CIR_BURST(cbs),
ANA_POL_CIR_CFG, pol_ix);
ocelot_write_gix(ocelot,
(cir_discard ? GENMASK(22, 0) : 0),
ANA_POL_CIR_STATE, pol_ix);
return 0;
}
int ocelot_policer_validate(const struct flow_action *action,
const struct flow_action_entry *a,
struct netlink_ext_ack *extack)
{
if (a->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when exceed action is not drop");
return -EOPNOTSUPP;
}
if (a->police.notexceed.act_id != FLOW_ACTION_PIPE &&
a->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when conform action is not pipe or ok");
return -EOPNOTSUPP;
}
if (a->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
!flow_action_is_last_entry(action, a)) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when conform action is ok, but police action is not last");
return -EOPNOTSUPP;
}
if (a->police.peakrate_bytes_ps ||
a->police.avrate || a->police.overhead) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when peakrate/avrate/overhead is configured");
return -EOPNOTSUPP;
}
if (a->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack,
"Offload does not support packets per second");
return -EOPNOTSUPP;
}
return 0;
}
EXPORT_SYMBOL(ocelot_policer_validate);
int ocelot_port_policer_add(struct ocelot *ocelot, int port,
struct ocelot_policer *pol)
{
struct qos_policer_conf pp = { 0 };
int err;
if (!pol)
return -EINVAL;
pp.mode = MSCC_QOS_RATE_MODE_DATA;
pp.pir = pol->rate;
pp.pbs = pol->burst;
dev_dbg(ocelot->dev, "%s: port %u pir %u kbps, pbs %u bytes\n",
__func__, port, pp.pir, pp.pbs);
err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp);
if (err)
return err;
ocelot_rmw_gix(ocelot,
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER_M,
ANA_PORT_POL_CFG, port);
return 0;
}
EXPORT_SYMBOL(ocelot_port_policer_add);
int ocelot_port_policer_del(struct ocelot *ocelot, int port)
{
struct qos_policer_conf pp = { 0 };
int err;
dev_dbg(ocelot->dev, "%s: port %u\n", __func__, port);
pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
err = qos_policer_conf_set(ocelot, POL_IX_PORT + port, &pp);
if (err)
return err;
ocelot_rmw_gix(ocelot,
ANA_PORT_POL_CFG_POL_ORDER(POL_ORDER),
ANA_PORT_POL_CFG_PORT_POL_ENA |
ANA_PORT_POL_CFG_POL_ORDER_M,
ANA_PORT_POL_CFG, port);
return 0;
}
EXPORT_SYMBOL(ocelot_port_policer_del);
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_police.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017 Microsemi Corporation
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <net/pkt_sched.h>
#include <soc/mscc/ocelot_hsio.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
#define TABLE_UPDATE_SLEEP_US 10
#define TABLE_UPDATE_TIMEOUT_US 100000
#define MEM_INIT_SLEEP_US 1000
#define MEM_INIT_TIMEOUT_US 100000
#define OCELOT_RSV_VLAN_RANGE_START 4000
struct ocelot_mact_entry {
u8 mac[ETH_ALEN];
u16 vid;
enum macaccess_entry_type type;
};
/* Caller must hold &ocelot->mact_lock */
static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
}
/* Caller must hold &ocelot->mact_lock */
static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
{
u32 val;
return readx_poll_timeout(ocelot_mact_read_macaccess,
ocelot, val,
(val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) ==
MACACCESS_CMD_IDLE,
TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
/* Caller must hold &ocelot->mact_lock */
static void ocelot_mact_select(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN],
unsigned int vid)
{
u32 macl = 0, mach = 0;
/* Set the MAC address to handle and the vlan associated in a format
* understood by the hardware.
*/
mach |= vid << 16;
mach |= mac[0] << 8;
mach |= mac[1] << 0;
macl |= mac[2] << 24;
macl |= mac[3] << 16;
macl |= mac[4] << 8;
macl |= mac[5] << 0;
ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA);
ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA);
}
static int __ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type)
{
u32 cmd = ANA_TABLES_MACACCESS_VALID |
ANA_TABLES_MACACCESS_DEST_IDX(port) |
ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
unsigned int mc_ports;
int err;
/* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
if (type == ENTRYTYPE_MACv4)
mc_ports = (mac[1] << 8) | mac[2];
else if (type == ENTRYTYPE_MACv6)
mc_ports = (mac[0] << 8) | mac[1];
else
mc_ports = 0;
if (mc_ports & BIT(ocelot->num_phys_ports))
cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
err = ocelot_mact_wait_for_completion(ocelot);
return err;
}
int ocelot_mact_learn(struct ocelot *ocelot, int port,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type type)
{
int ret;
mutex_lock(&ocelot->mact_lock);
ret = __ocelot_mact_learn(ocelot, port, mac, vid, type);
mutex_unlock(&ocelot->mact_lock);
return ret;
}
EXPORT_SYMBOL(ocelot_mact_learn);
int ocelot_mact_forget(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN], unsigned int vid)
{
int err;
mutex_lock(&ocelot->mact_lock);
ocelot_mact_select(ocelot, mac, vid);
/* Issue a forget command */
ocelot_write(ocelot,
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
ANA_TABLES_MACACCESS);
err = ocelot_mact_wait_for_completion(ocelot);
mutex_unlock(&ocelot->mact_lock);
return err;
}
EXPORT_SYMBOL(ocelot_mact_forget);
int ocelot_mact_lookup(struct ocelot *ocelot, int *dst_idx,
const unsigned char mac[ETH_ALEN],
unsigned int vid, enum macaccess_entry_type *type)
{
int val;
mutex_lock(&ocelot->mact_lock);
ocelot_mact_select(ocelot, mac, vid);
/* Issue a read command with MACACCESS_VALID=1. */
ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
ANA_TABLES_MACACCESS);
if (ocelot_mact_wait_for_completion(ocelot)) {
mutex_unlock(&ocelot->mact_lock);
return -ETIMEDOUT;
}
/* Read back the entry flags */
val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
mutex_unlock(&ocelot->mact_lock);
if (!(val & ANA_TABLES_MACACCESS_VALID))
return -ENOENT;
*dst_idx = ANA_TABLES_MACACCESS_DEST_IDX_X(val);
*type = ANA_TABLES_MACACCESS_ENTRYTYPE_X(val);
return 0;
}
EXPORT_SYMBOL(ocelot_mact_lookup);
int ocelot_mact_learn_streamdata(struct ocelot *ocelot, int dst_idx,
const unsigned char mac[ETH_ALEN],
unsigned int vid,
enum macaccess_entry_type type,
int sfid, int ssid)
{
int ret;
mutex_lock(&ocelot->mact_lock);
ocelot_write(ocelot,
(sfid < 0 ? 0 : ANA_TABLES_STREAMDATA_SFID_VALID) |
ANA_TABLES_STREAMDATA_SFID(sfid) |
(ssid < 0 ? 0 : ANA_TABLES_STREAMDATA_SSID_VALID) |
ANA_TABLES_STREAMDATA_SSID(ssid),
ANA_TABLES_STREAMDATA);
ret = __ocelot_mact_learn(ocelot, dst_idx, mac, vid, type);
mutex_unlock(&ocelot->mact_lock);
return ret;
}
EXPORT_SYMBOL(ocelot_mact_learn_streamdata);
static void ocelot_mact_init(struct ocelot *ocelot)
{
/* Configure the learning mode entries attributes:
* - Do not copy the frame to the CPU extraction queues.
* - Use the vlan and mac_cpoy for dmac lookup.
*/
ocelot_rmw(ocelot, 0,
ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS
| ANA_AGENCTRL_LEARN_FWD_KILL
| ANA_AGENCTRL_LEARN_IGNORE_VLAN,
ANA_AGENCTRL);
/* Clear the MAC table. We are not concurrent with anyone, so
* holding &ocelot->mact_lock is pointless.
*/
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
void ocelot_pll5_init(struct ocelot *ocelot)
{
/* Configure PLL5. This will need a proper CCF driver
* The values are coming from the VTSS API for Ocelot
*/
regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
HSIO_PLL5G_CFG0_ENA_BIAS |
HSIO_PLL5G_CFG0_ENA_VCO_BUF |
HSIO_PLL5G_CFG0_ENA_CP1 |
HSIO_PLL5G_CFG0_SELCPI(2) |
HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) |
HSIO_PLL5G_CFG0_SELBGV820(4) |
HSIO_PLL5G_CFG0_DIV4 |
HSIO_PLL5G_CFG0_ENA_CLKTREE |
HSIO_PLL5G_CFG0_ENA_LANE);
regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
HSIO_PLL5G_CFG2_ENA_AMPCTRL |
HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
}
EXPORT_SYMBOL(ocelot_pll5_init);
static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
{
ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
ANA_PORT_VCAP_S2_CFG, port);
ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA,
ANA_PORT_VCAP_CFG, port);
ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN,
REW_PORT_CFG_ES0_EN,
REW_PORT_CFG, port);
}
static int ocelot_single_vlan_aware_bridge(struct ocelot *ocelot,
struct netlink_ext_ack *extack)
{
struct net_device *bridge = NULL;
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port || !ocelot_port->bridge ||
!br_vlan_enabled(ocelot_port->bridge))
continue;
if (!bridge) {
bridge = ocelot_port->bridge;
continue;
}
if (bridge == ocelot_port->bridge)
continue;
NL_SET_ERR_MSG_MOD(extack,
"Only one VLAN-aware bridge is supported");
return -EBUSY;
}
return 0;
}
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
}
static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
{
u32 val;
return readx_poll_timeout(ocelot_vlant_read_vlanaccess,
ocelot,
val,
(val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) ==
ANA_TABLES_VLANACCESS_CMD_IDLE,
TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
{
/* Select the VID to configure */
ocelot_write(ocelot, ANA_TABLES_VLANTIDX_V_INDEX(vid),
ANA_TABLES_VLANTIDX);
/* Set the vlan port members mask and issue a write command */
ocelot_write(ocelot, ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(mask) |
ANA_TABLES_VLANACCESS_CMD_WRITE,
ANA_TABLES_VLANACCESS);
return ocelot_vlant_wait_for_completion(ocelot);
}
static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
{
struct ocelot_bridge_vlan *vlan;
int num_untagged = 0;
list_for_each_entry(vlan, &ocelot->vlans, list) {
if (!(vlan->portmask & BIT(port)))
continue;
/* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(),
* because this is never active in hardware at the same time as
* the bridge VLANs, which only matter in VLAN-aware mode.
*/
if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START)
continue;
if (vlan->untagged & BIT(port))
num_untagged++;
}
return num_untagged;
}
static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port)
{
struct ocelot_bridge_vlan *vlan;
int num_tagged = 0;
list_for_each_entry(vlan, &ocelot->vlans, list) {
if (!(vlan->portmask & BIT(port)))
continue;
if (!(vlan->untagged & BIT(port)))
num_tagged++;
}
return num_tagged;
}
/* We use native VLAN when we have to mix egress-tagged VLANs with exactly
* _one_ egress-untagged VLAN (_the_ native VLAN)
*/
static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port)
{
return ocelot_port_num_tagged_vlans(ocelot, port) &&
ocelot_port_num_untagged_vlans(ocelot, port) == 1;
}
static struct ocelot_bridge_vlan *
ocelot_port_find_native_vlan(struct ocelot *ocelot, int port)
{
struct ocelot_bridge_vlan *vlan;
list_for_each_entry(vlan, &ocelot->vlans, list)
if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port))
return vlan;
return NULL;
}
/* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable,
* REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness
* state of the port.
*/
static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
enum ocelot_port_tag_config tag_cfg;
bool uses_native_vlan = false;
if (ocelot_port->vlan_aware) {
uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port);
if (uses_native_vlan)
tag_cfg = OCELOT_PORT_TAG_NATIVE;
else if (ocelot_port_num_untagged_vlans(ocelot, port))
tag_cfg = OCELOT_PORT_TAG_DISABLED;
else
tag_cfg = OCELOT_PORT_TAG_TRUNK;
} else {
tag_cfg = OCELOT_PORT_TAG_DISABLED;
}
ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg),
REW_TAG_CFG_TAG_CFG_M,
REW_TAG_CFG, port);
if (uses_native_vlan) {
struct ocelot_bridge_vlan *native_vlan;
/* Not having a native VLAN is impossible, because
* ocelot_port_num_untagged_vlans has returned 1.
* So there is no use in checking for NULL here.
*/
native_vlan = ocelot_port_find_native_vlan(ocelot, port);
ocelot_rmw_gix(ocelot,
REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid),
REW_PORT_VLAN_CFG_PORT_VID_M,
REW_PORT_VLAN_CFG, port);
}
}
int ocelot_bridge_num_find(struct ocelot *ocelot,
const struct net_device *bridge)
{
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (ocelot_port && ocelot_port->bridge == bridge)
return ocelot_port->bridge_num;
}
return -1;
}
EXPORT_SYMBOL_GPL(ocelot_bridge_num_find);
static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
const struct net_device *bridge)
{
int bridge_num;
/* Standalone ports use VID 0 */
if (!bridge)
return 0;
bridge_num = ocelot_bridge_num_find(ocelot, bridge);
if (WARN_ON(bridge_num < 0))
return 0;
/* VLAN-unaware bridges use a reserved VID going from 4095 downwards */
return VLAN_N_VID - bridge_num - 1;
}
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
u32 val = 0;
ocelot_port->pvid_vlan = pvid_vlan;
if (ocelot_port->vlan_aware && pvid_vlan)
pvid = pvid_vlan->vid;
ocelot_rmw_gix(ocelot,
ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
ANA_PORT_VLAN_CFG_VLAN_VID_M,
ANA_PORT_VLAN_CFG, port);
/* If there's no pvid, we should drop not only untagged traffic (which
* happens automatically), but also 802.1p traffic which gets
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
*/
if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
ocelot_rmw_gix(ocelot, val,
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
ANA_PORT_DROP_CFG, port);
}
static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
u16 vid)
{
struct ocelot_bridge_vlan *vlan;
list_for_each_entry(vlan, &ocelot->vlans, list)
if (vlan->vid == vid)
return vlan;
return NULL;
}
static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid,
bool untagged)
{
struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
unsigned long portmask;
int err;
if (vlan) {
portmask = vlan->portmask | BIT(port);
err = ocelot_vlant_set_mask(ocelot, vid, portmask);
if (err)
return err;
vlan->portmask = portmask;
/* Bridge VLANs can be overwritten with a different
* egress-tagging setting, so make sure to override an untagged
* with a tagged VID if that's going on.
*/
if (untagged)
vlan->untagged |= BIT(port);
else
vlan->untagged &= ~BIT(port);
return 0;
}
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
return -ENOMEM;
portmask = BIT(port);
err = ocelot_vlant_set_mask(ocelot, vid, portmask);
if (err) {
kfree(vlan);
return err;
}
vlan->vid = vid;
vlan->portmask = portmask;
if (untagged)
vlan->untagged = BIT(port);
INIT_LIST_HEAD(&vlan->list);
list_add_tail(&vlan->list, &ocelot->vlans);
return 0;
}
static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
{
struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
unsigned long portmask;
int err;
if (!vlan)
return 0;
portmask = vlan->portmask & ~BIT(port);
err = ocelot_vlant_set_mask(ocelot, vid, portmask);
if (err)
return err;
vlan->portmask = portmask;
if (vlan->portmask)
return 0;
list_del(&vlan->list);
kfree(vlan);
return 0;
}
static int ocelot_add_vlan_unaware_pvid(struct ocelot *ocelot, int port,
const struct net_device *bridge)
{
u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
return ocelot_vlan_member_add(ocelot, port, vid, true);
}
static int ocelot_del_vlan_unaware_pvid(struct ocelot *ocelot, int port,
const struct net_device *bridge)
{
u16 vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
return ocelot_vlan_member_del(ocelot, port, vid);
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
bool vlan_aware, struct netlink_ext_ack *extack)
{
struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_vcap_filter *filter;
int err = 0;
u32 val;
list_for_each_entry(filter, &block->rules, list) {
if (filter->ingress_port_mask & BIT(port) &&
filter->action.vid_replace_ena) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot change VLAN state with vlan modify rules active");
return -EBUSY;
}
}
err = ocelot_single_vlan_aware_bridge(ocelot, extack);
if (err)
return err;
if (vlan_aware)
err = ocelot_del_vlan_unaware_pvid(ocelot, port,
ocelot_port->bridge);
else if (ocelot_port->bridge)
err = ocelot_add_vlan_unaware_pvid(ocelot, port,
ocelot_port->bridge);
if (err)
return err;
ocelot_port->vlan_aware = vlan_aware;
if (vlan_aware)
val = ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1);
else
val = 0;
ocelot_rmw_gix(ocelot, val,
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
ANA_PORT_VLAN_CFG, port);
ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
EXPORT_SYMBOL(ocelot_port_vlan_filtering);
int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged, struct netlink_ext_ack *extack)
{
if (untagged) {
/* We are adding an egress-tagged VLAN */
if (ocelot_port_uses_native_vlan(ocelot, port)) {
NL_SET_ERR_MSG_MOD(extack,
"Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN");
return -EBUSY;
}
} else {
/* We are adding an egress-tagged VLAN */
if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) {
NL_SET_ERR_MSG_MOD(extack,
"Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs");
return -EBUSY;
}
}
if (vid > OCELOT_RSV_VLAN_RANGE_START) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN range 4000-4095 reserved for VLAN-unaware bridging");
return -EBUSY;
}
return 0;
}
EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
{
int err;
/* Ignore VID 0 added to our RX filter by the 8021q module, since
* that collides with OCELOT_STANDALONE_PVID and changes it from
* egress-untagged to egress-tagged.
*/
if (!vid)
return 0;
err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err)
return err;
/* Default ingress vlan classification */
if (pvid)
ocelot_port_set_pvid(ocelot, port,
ocelot_bridge_vlan_find(ocelot, vid));
/* Untagged egress vlan clasification */
ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
EXPORT_SYMBOL(ocelot_vlan_add);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
bool del_pvid = false;
int err;
if (!vid)
return 0;
if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
del_pvid = true;
err = ocelot_vlan_member_del(ocelot, port, vid);
if (err)
return err;
/* Ingress */
if (del_pvid)
ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
EXPORT_SYMBOL(ocelot_vlan_del);
static void ocelot_vlan_init(struct ocelot *ocelot)
{
unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0);
u16 port, vid;
/* Clear VLAN table, by default all ports are members of all VLANs */
ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT,
ANA_TABLES_VLANACCESS);
ocelot_vlant_wait_for_completion(ocelot);
/* Configure the port VLAN memberships */
for (vid = 1; vid < VLAN_N_VID; vid++)
ocelot_vlant_set_mask(ocelot, vid, 0);
/* We need VID 0 to get traffic on standalone ports.
* It is added automatically if the 8021q module is loaded, but we
* can't rely on that since it might not be.
*/
ocelot_vlant_set_mask(ocelot, OCELOT_STANDALONE_PVID, all_ports);
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
*/
ocelot_write(ocelot, all_ports, ANA_VLANMASK);
for (port = 0; port < ocelot->num_phys_ports; port++) {
ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port);
ocelot_write_gix(ocelot, 0, REW_TAG_CFG, port);
}
}
static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
{
return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
}
static int ocelot_port_flush(struct ocelot *ocelot, int port)
{
unsigned int pause_ena;
int err, val;
/* Disable dequeuing from the egress queues */
ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
QSYS_PORT_MODE_DEQUEUE_DIS,
QSYS_PORT_MODE, port);
/* Disable flow control */
ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
/* Disable priority flow control */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
/* Wait at least the time it takes to receive a frame of maximum length
* at the port.
* Worst-case delays for 10 kilobyte jumbo frames are:
* 8 ms on a 10M port
* 800 μs on a 100M port
* 80 μs on a 1G port
* 32 μs on a 2.5G port
*/
usleep_range(8000, 10000);
/* Disable half duplex backpressure. */
ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
SYS_FRONT_PORT_MODE, port);
/* Flush the queues associated with the port. */
ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
REW_PORT_CFG, port);
/* Enable dequeuing from the egress queues. */
ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
port);
/* Wait until flushing is complete. */
err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
100, 2000000, false, ocelot, port);
/* Clear flushing again. */
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
/* Re-enable flow control */
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
return err;
}
int ocelot_port_configure_serdes(struct ocelot *ocelot, int port,
struct device_node *portnp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct device *dev = ocelot->dev;
int err;
/* Ensure clock signals and speed are set on all QSGMII links */
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_QSGMII)
ocelot_port_rmwl(ocelot_port, 0,
DEV_CLOCK_CFG_MAC_TX_RST |
DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG);
if (ocelot_port->phy_mode != PHY_INTERFACE_MODE_INTERNAL) {
struct phy *serdes = of_phy_get(portnp, NULL);
if (IS_ERR(serdes)) {
err = PTR_ERR(serdes);
dev_err_probe(dev, err,
"missing SerDes phys for port %d\n",
port);
return err;
}
err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET,
ocelot_port->phy_mode);
of_phy_put(serdes);
if (err) {
dev_err(dev, "Could not SerDes mode on port %d: %pe\n",
port, ERR_PTR(err));
return err;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_port_configure_serdes);
void ocelot_phylink_mac_config(struct ocelot *ocelot, int port,
unsigned int link_an_mode,
const struct phylink_link_state *state)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
/* Disable HDX fast control */
ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS,
DEV_PORT_MISC);
/* SGMII only for now */
ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA,
PCS1G_MODE_CFG);
ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG);
/* Enable PCS */
ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG);
/* No aneg on SGMII */
ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG);
/* No loopback */
ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG);
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_config);
void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port,
unsigned int link_an_mode,
phy_interface_t interface,
unsigned long quirks)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
ocelot_port->speed = SPEED_UNKNOWN;
ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
DEV_MAC_ENA_CFG);
if (ocelot->ops->cut_through_fwd) {
mutex_lock(&ocelot->fwd_domain_lock);
ocelot->ops->cut_through_fwd(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
}
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
err = ocelot_port_flush(ocelot, port);
if (err)
dev_err(ocelot->dev, "failed to flush port %d: %d\n",
port, err);
/* Put the port in reset. */
if (interface != PHY_INTERFACE_MODE_QSGMII ||
!(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP))
ocelot_port_rmwl(ocelot_port,
DEV_CLOCK_CFG_MAC_TX_RST |
DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG_MAC_TX_RST |
DEV_CLOCK_CFG_MAC_RX_RST,
DEV_CLOCK_CFG);
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down);
void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
struct phy_device *phydev,
unsigned int link_an_mode,
phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause,
unsigned long quirks)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int mac_speed, mode = 0;
u32 mac_fc_cfg;
ocelot_port->speed = speed;
/* The MAC might be integrated in systems where the MAC speed is fixed
* and it's the PCS who is performing the rate adaptation, so we have
* to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG
* (which is also its default value).
*/
if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) ||
speed == SPEED_1000) {
mac_speed = OCELOT_SPEED_1000;
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
} else if (speed == SPEED_2500) {
mac_speed = OCELOT_SPEED_2500;
mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA;
} else if (speed == SPEED_100) {
mac_speed = OCELOT_SPEED_100;
} else {
mac_speed = OCELOT_SPEED_10;
}
if (duplex == DUPLEX_FULL)
mode |= DEV_MAC_MODE_CFG_FDX_ENA;
ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG);
/* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
* PORT_RST bits in DEV_CLOCK_CFG.
*/
ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed),
DEV_CLOCK_CFG);
switch (speed) {
case SPEED_10:
mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10);
break;
case SPEED_100:
mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100);
break;
case SPEED_1000:
case SPEED_2500:
mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000);
break;
default:
dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n",
port, speed);
return;
}
if (rx_pause)
mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
if (tx_pause)
mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
/* Flow control. Link speed is only used here to evaluate the time
* specification in incoming pause frames.
*/
ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
/* Don't attempt to send PAUSE frames on the NPI port, it's broken */
if (port != ocelot->npi)
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA,
tx_pause);
/* Undo the effects of ocelot_phylink_mac_link_down:
* enable MAC module
*/
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
/* If the port supports cut-through forwarding, update the masks before
* enabling forwarding on the port.
*/
if (ocelot->ops->cut_through_fwd) {
mutex_lock(&ocelot->fwd_domain_lock);
/* Workaround for hardware bug - FP doesn't work
* at all link speeds for all PHY modes. The function
* below also calls ocelot->ops->cut_through_fwd(),
* so we don't need to do it twice.
*/
ocelot_port_update_active_preemptible_tcs(ocelot, port);
mutex_unlock(&ocelot->fwd_domain_lock);
}
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
}
EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up);
static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
u32 *rval)
{
u32 bytes_valid, val;
val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
if (val == XTR_NOT_READY) {
if (ifh)
return -EIO;
do {
val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
} while (val == XTR_NOT_READY);
}
switch (val) {
case XTR_ABORT:
return -EIO;
case XTR_EOF_0:
case XTR_EOF_1:
case XTR_EOF_2:
case XTR_EOF_3:
case XTR_PRUNED:
bytes_valid = XTR_VALID_BYTES(val);
val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
if (val == XTR_ESCAPE)
*rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
else
*rval = val;
return bytes_valid;
case XTR_ESCAPE:
*rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
return 4;
default:
*rval = val;
return 4;
}
}
static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
{
int i, err = 0;
for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]);
if (err != 4)
return (err < 0) ? err : -EIO;
}
return 0;
}
void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
u64 timestamp)
{
struct skb_shared_hwtstamps *shhwtstamps;
u64 tod_in_ns, full_ts_in_ns;
struct timespec64 ts;
ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
if ((tod_in_ns & 0xffffffff) < timestamp)
full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
timestamp;
else
full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
timestamp;
shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamps->hwtstamp = full_ts_in_ns;
}
EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
{
u64 timestamp, src_port, len;
u32 xfh[OCELOT_TAG_LEN / 4];
struct net_device *dev;
struct sk_buff *skb;
int sz, buf_len;
u32 val, *buf;
int err;
err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
if (err)
return err;
ocelot_xfh_get_src_port(xfh, &src_port);
ocelot_xfh_get_len(xfh, &len);
ocelot_xfh_get_rew_val(xfh, ×tamp);
if (WARN_ON(src_port >= ocelot->num_phys_ports))
return -EINVAL;
dev = ocelot->ops->port_to_netdev(ocelot, src_port);
if (!dev)
return -EINVAL;
skb = netdev_alloc_skb(dev, len);
if (unlikely(!skb)) {
netdev_err(dev, "Unable to allocate sk_buff\n");
return -ENOMEM;
}
buf_len = len - ETH_FCS_LEN;
buf = (u32 *)skb_put(skb, buf_len);
len = 0;
do {
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
if (sz < 0) {
err = sz;
goto out_free_skb;
}
*buf++ = val;
len += sz;
} while (len < buf_len);
/* Read the FCS */
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
if (sz < 0) {
err = sz;
goto out_free_skb;
}
/* Update the statistics if part of the FCS was read before */
len -= ETH_FCS_LEN - sz;
if (unlikely(dev->features & NETIF_F_RXFCS)) {
buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
*buf = val;
}
if (ocelot->ptp)
ocelot_ptp_rx_timestamp(ocelot, skb, timestamp);
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded.
*/
if (ocelot->ports[src_port]->bridge)
skb->offload_fwd_mark = 1;
skb->protocol = eth_type_trans(skb, dev);
*nskb = skb;
return 0;
out_free_skb:
kfree_skb(skb);
return err;
}
EXPORT_SYMBOL(ocelot_xtr_poll_frame);
bool ocelot_can_inject(struct ocelot *ocelot, int grp)
{
u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
return false;
if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
return false;
return true;
}
EXPORT_SYMBOL(ocelot_can_inject);
void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
{
ocelot_ifh_set_bypass(ifh, 1);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
if (vlan_tag)
ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
if (rew_op)
ocelot_ifh_set_rew_op(ifh, rew_op);
}
EXPORT_SYMBOL(ocelot_ifh_port_set);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb)
{
u32 ifh[OCELOT_TAG_LEN / 4] = {0};
unsigned int i, count, last;
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
count = DIV_ROUND_UP(skb->len, 4);
last = skb->len % 4;
for (i = 0; i < count; i++)
ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
/* Add padding */
while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
i++;
}
/* Indicate EOF and valid bytes in last word */
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
QS_INJ_CTRL_EOF,
QS_INJ_CTRL, grp);
/* Add dummy CRC */
ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
skb_tx_timestamp(skb);
skb->dev->stats.tx_packets++;
skb->dev->stats.tx_bytes += skb->len;
}
EXPORT_SYMBOL(ocelot_port_inject_frame);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
{
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
ocelot_read_rix(ocelot, QS_XTR_RD, grp);
}
EXPORT_SYMBOL(ocelot_drain_cpu_queue);
int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr,
u16 vid, const struct net_device *bridge)
{
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
}
EXPORT_SYMBOL(ocelot_fdb_add);
int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr,
u16 vid, const struct net_device *bridge)
{
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
return ocelot_mact_forget(ocelot, addr, vid);
}
EXPORT_SYMBOL(ocelot_fdb_del);
/* Caller must hold &ocelot->mact_lock */
static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
struct ocelot_mact_entry *entry)
{
u32 val, dst, macl, mach;
char mac[ETH_ALEN];
/* Set row and column to read from */
ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row);
ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col);
/* Issue a read command */
ocelot_write(ocelot,
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ),
ANA_TABLES_MACACCESS);
if (ocelot_mact_wait_for_completion(ocelot))
return -ETIMEDOUT;
/* Read the entry flags */
val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
if (!(val & ANA_TABLES_MACACCESS_VALID))
return -EINVAL;
/* If the entry read has another port configured as its destination,
* do not report it.
*/
dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3;
if (dst != port)
return -EINVAL;
/* Get the entry's MAC address and VLAN id */
macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA);
mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA);
mac[0] = (mach >> 8) & 0xff;
mac[1] = (mach >> 0) & 0xff;
mac[2] = (macl >> 24) & 0xff;
mac[3] = (macl >> 16) & 0xff;
mac[4] = (macl >> 8) & 0xff;
mac[5] = (macl >> 0) & 0xff;
entry->vid = (mach >> 16) & 0xfff;
ether_addr_copy(entry->mac, mac);
return 0;
}
int ocelot_mact_flush(struct ocelot *ocelot, int port)
{
int err;
mutex_lock(&ocelot->mact_lock);
/* Program ageing filter for a single port */
ocelot_write(ocelot, ANA_ANAGEFIL_PID_EN | ANA_ANAGEFIL_PID_VAL(port),
ANA_ANAGEFIL);
/* Flushing dynamic FDB entries requires two successive age scans */
ocelot_write(ocelot,
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
ANA_TABLES_MACACCESS);
err = ocelot_mact_wait_for_completion(ocelot);
if (err) {
mutex_unlock(&ocelot->mact_lock);
return err;
}
/* And second... */
ocelot_write(ocelot,
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_AGE),
ANA_TABLES_MACACCESS);
err = ocelot_mact_wait_for_completion(ocelot);
/* Restore ageing filter */
ocelot_write(ocelot, 0, ANA_ANAGEFIL);
mutex_unlock(&ocelot->mact_lock);
return err;
}
EXPORT_SYMBOL_GPL(ocelot_mact_flush);
int ocelot_fdb_dump(struct ocelot *ocelot, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
int err = 0;
int i, j;
/* We could take the lock just around ocelot_mact_read, but doing so
* thousands of times in a row seems rather pointless and inefficient.
*/
mutex_lock(&ocelot->mact_lock);
/* Loop through all the mac tables entries. */
for (i = 0; i < ocelot->num_mact_rows; i++) {
for (j = 0; j < 4; j++) {
struct ocelot_mact_entry entry;
bool is_static;
err = ocelot_mact_read(ocelot, port, i, j, &entry);
/* If the entry is invalid (wrong port, invalid...),
* skip it.
*/
if (err == -EINVAL)
continue;
else if (err)
break;
is_static = (entry.type == ENTRYTYPE_LOCKED);
/* Hide the reserved VLANs used for
* VLAN-unaware bridging.
*/
if (entry.vid > OCELOT_RSV_VLAN_RANGE_START)
entry.vid = 0;
err = cb(entry.mac, entry.vid, is_static, data);
if (err)
break;
}
}
mutex_unlock(&ocelot->mact_lock);
return err;
}
EXPORT_SYMBOL(ocelot_fdb_dump);
int ocelot_trap_add(struct ocelot *ocelot, int port,
unsigned long cookie, bool take_ts,
void (*populate)(struct ocelot_vcap_filter *f))
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
bool new = false;
int err;
block_vcap_is2 = &ocelot->block[VCAP_IS2];
trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
false);
if (!trap) {
trap = kzalloc(sizeof(*trap), GFP_KERNEL);
if (!trap)
return -ENOMEM;
populate(trap);
trap->prio = 1;
trap->id.cookie = cookie;
trap->id.tc_offload = false;
trap->block_id = VCAP_IS2;
trap->type = OCELOT_VCAP_FILTER_OFFLOAD;
trap->lookup = 0;
trap->action.cpu_copy_ena = true;
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
trap->take_ts = take_ts;
trap->is_trap = true;
new = true;
}
trap->ingress_port_mask |= BIT(port);
if (new)
err = ocelot_vcap_filter_add(ocelot, trap, NULL);
else
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
if (!trap->ingress_port_mask)
kfree(trap);
return err;
}
return 0;
}
int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
block_vcap_is2 = &ocelot->block[VCAP_IS2];
trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
false);
if (!trap)
return 0;
trap->ingress_port_mask &= ~BIT(port);
if (!trap->ingress_port_mask)
return ocelot_vcap_filter_del(ocelot, trap);
return ocelot_vcap_filter_replace(ocelot, trap);
}
static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond)
{
u32 mask = 0;
int port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
if (ocelot_port->bond == bond)
mask |= BIT(port);
}
return mask;
}
/* The logical port number of a LAG is equal to the lowest numbered physical
* port ID present in that LAG. It may change if that port ever leaves the LAG.
*/
int ocelot_bond_get_id(struct ocelot *ocelot, struct net_device *bond)
{
int bond_mask = ocelot_get_bond_mask(ocelot, bond);
if (!bond_mask)
return -ENOENT;
return __ffs(bond_mask);
}
EXPORT_SYMBOL_GPL(ocelot_bond_get_id);
/* Returns the mask of user ports assigned to this DSA tag_8021q CPU port.
* Note that when CPU ports are in a LAG, the user ports are assigned to the
* 'primary' CPU port, the one whose physical port number gives the logical
* port number of the LAG.
*
* We leave PGID_SRC poorly configured for the 'secondary' CPU port in the LAG
* (to which no user port is assigned), but it appears that forwarding from
* this secondary CPU port looks at the PGID_SRC associated with the logical
* port ID that it's assigned to, which *is* configured properly.
*/
static u32 ocelot_dsa_8021q_cpu_assigned_ports(struct ocelot *ocelot,
struct ocelot_port *cpu)
{
u32 mask = 0;
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
if (ocelot_port->dsa_8021q_cpu == cpu)
mask |= BIT(port);
}
if (cpu->bond)
mask &= ~ocelot_get_bond_mask(ocelot, cpu->bond);
return mask;
}
/* Returns the DSA tag_8021q CPU port that the given port is assigned to,
* or the bit mask of CPU ports if said CPU port is in a LAG.
*/
u32 ocelot_port_assigned_dsa_8021q_cpu_mask(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_port *cpu_port = ocelot_port->dsa_8021q_cpu;
if (!cpu_port)
return 0;
if (cpu_port->bond)
return ocelot_get_bond_mask(ocelot, cpu_port->bond);
return BIT(cpu_port->index);
}
EXPORT_SYMBOL_GPL(ocelot_port_assigned_dsa_8021q_cpu_mask);
u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port)
{
struct ocelot_port *ocelot_port = ocelot->ports[src_port];
const struct net_device *bridge;
u32 mask = 0;
int port;
if (!ocelot_port || ocelot_port->stp_state != BR_STATE_FORWARDING)
return 0;
bridge = ocelot_port->bridge;
if (!bridge)
return 0;
for (port = 0; port < ocelot->num_phys_ports; port++) {
ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
if (ocelot_port->stp_state == BR_STATE_FORWARDING &&
ocelot_port->bridge == bridge)
mask |= BIT(port);
}
return mask;
}
EXPORT_SYMBOL_GPL(ocelot_get_bridge_fwd_mask);
static void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot, bool joining)
{
int port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
/* If cut-through forwarding is supported, update the masks before a
* port joins the forwarding domain, to avoid potential underruns if it
* has the highest speed from the new domain.
*/
if (joining && ocelot->ops->cut_through_fwd)
ocelot->ops->cut_through_fwd(ocelot);
/* Apply FWD mask. The loop is needed to add/remove the current port as
* a source for the other ports.
*/
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
unsigned long mask;
if (!ocelot_port) {
/* Unused ports can't send anywhere */
mask = 0;
} else if (ocelot_port->is_dsa_8021q_cpu) {
/* The DSA tag_8021q CPU ports need to be able to
* forward packets to all ports assigned to them.
*/
mask = ocelot_dsa_8021q_cpu_assigned_ports(ocelot,
ocelot_port);
} else if (ocelot_port->bridge) {
struct net_device *bond = ocelot_port->bond;
mask = ocelot_get_bridge_fwd_mask(ocelot, port);
mask &= ~BIT(port);
mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
port);
if (bond)
mask &= ~ocelot_get_bond_mask(ocelot, bond);
} else {
/* Standalone ports forward only to DSA tag_8021q CPU
* ports (if those exist), or to the hardware CPU port
* module otherwise.
*/
mask = ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
port);
}
ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
}
/* If cut-through forwarding is supported and a port is leaving, there
* is a chance that cut-through was disabled on the other ports due to
* the port which is leaving (it has a higher link speed). We need to
* update the cut-through masks of the remaining ports no earlier than
* after the port has left, to prevent underruns from happening between
* the cut-through update and the forwarding domain update.
*/
if (!joining && ocelot->ops->cut_through_fwd)
ocelot->ops->cut_through_fwd(ocelot);
}
/* Update PGID_CPU which is the destination port mask used for whitelisting
* unicast addresses filtered towards the host. In the normal and NPI modes,
* this points to the analyzer entry for the CPU port module, while in DSA
* tag_8021q mode, it is a bit mask of all active CPU ports.
* PGID_SRC will take care of forwarding a packet from one user port to
* no more than a single CPU port.
*/
static void ocelot_update_pgid_cpu(struct ocelot *ocelot)
{
int pgid_cpu = 0;
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port || !ocelot_port->is_dsa_8021q_cpu)
continue;
pgid_cpu |= BIT(port);
}
if (!pgid_cpu)
pgid_cpu = BIT(ocelot->num_phys_ports);
ocelot_write_rix(ocelot, pgid_cpu, ANA_PGID_PGID, PGID_CPU);
}
void ocelot_port_setup_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
{
struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
mutex_lock(&ocelot->fwd_domain_lock);
cpu_port->is_dsa_8021q_cpu = true;
for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
ocelot_vlan_member_add(ocelot, cpu, vid, true);
ocelot_update_pgid_cpu(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL_GPL(ocelot_port_setup_dsa_8021q_cpu);
void ocelot_port_teardown_dsa_8021q_cpu(struct ocelot *ocelot, int cpu)
{
struct ocelot_port *cpu_port = ocelot->ports[cpu];
u16 vid;
mutex_lock(&ocelot->fwd_domain_lock);
cpu_port->is_dsa_8021q_cpu = false;
for (vid = OCELOT_RSV_VLAN_RANGE_START; vid < VLAN_N_VID; vid++)
ocelot_vlan_member_del(ocelot, cpu_port->index, vid);
ocelot_update_pgid_cpu(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL_GPL(ocelot_port_teardown_dsa_8021q_cpu);
void ocelot_port_assign_dsa_8021q_cpu(struct ocelot *ocelot, int port,
int cpu)
{
struct ocelot_port *cpu_port = ocelot->ports[cpu];
mutex_lock(&ocelot->fwd_domain_lock);
ocelot->ports[port]->dsa_8021q_cpu = cpu_port;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL_GPL(ocelot_port_assign_dsa_8021q_cpu);
void ocelot_port_unassign_dsa_8021q_cpu(struct ocelot *ocelot, int port)
{
mutex_lock(&ocelot->fwd_domain_lock);
ocelot->ports[port]->dsa_8021q_cpu = NULL;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL_GPL(ocelot_port_unassign_dsa_8021q_cpu);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 learn_ena = 0;
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port->stp_state = state;
if ((state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) &&
ocelot_port->learn_ena)
learn_ena = ANA_PORT_PORT_CFG_LEARN_ENA;
ocelot_rmw_gix(ocelot, learn_ena, ANA_PORT_PORT_CFG_LEARN_ENA,
ANA_PORT_PORT_CFG, port);
ocelot_apply_bridge_fwd_mask(ocelot, state == BR_STATE_FORWARDING);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
{
unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
/* Setting AGE_PERIOD to zero effectively disables automatic aging,
* which is clearly not what our intention is. So avoid that.
*/
if (!age_period)
age_period = 1;
ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
}
EXPORT_SYMBOL(ocelot_set_ageing_time);
static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
const unsigned char *addr,
u16 vid)
{
struct ocelot_multicast *mc;
list_for_each_entry(mc, &ocelot->multicast, list) {
if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
return mc;
}
return NULL;
}
static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
{
if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e)
return ENTRYTYPE_MACv4;
if (addr[0] == 0x33 && addr[1] == 0x33)
return ENTRYTYPE_MACv6;
return ENTRYTYPE_LOCKED;
}
static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index,
unsigned long ports)
{
struct ocelot_pgid *pgid;
pgid = kzalloc(sizeof(*pgid), GFP_KERNEL);
if (!pgid)
return ERR_PTR(-ENOMEM);
pgid->ports = ports;
pgid->index = index;
refcount_set(&pgid->refcount, 1);
list_add_tail(&pgid->list, &ocelot->pgids);
return pgid;
}
static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid)
{
if (!refcount_dec_and_test(&pgid->refcount))
return;
list_del(&pgid->list);
kfree(pgid);
}
static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot,
const struct ocelot_multicast *mc)
{
struct ocelot_pgid *pgid;
int index;
/* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
* 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
* destination mask table (PGID), the destination set is programmed as
* part of the entry MAC address.", and the DEST_IDX is set to 0.
*/
if (mc->entry_type == ENTRYTYPE_MACv4 ||
mc->entry_type == ENTRYTYPE_MACv6)
return ocelot_pgid_alloc(ocelot, 0, mc->ports);
list_for_each_entry(pgid, &ocelot->pgids, list) {
/* When searching for a nonreserved multicast PGID, ignore the
* dummy PGID of zero that we have for MACv4/MACv6 entries
*/
if (pgid->index && pgid->ports == mc->ports) {
refcount_inc(&pgid->refcount);
return pgid;
}
}
/* Search for a free index in the nonreserved multicast PGID area */
for_each_nonreserved_multicast_dest_pgid(ocelot, index) {
bool used = false;
list_for_each_entry(pgid, &ocelot->pgids, list) {
if (pgid->index == index) {
used = true;
break;
}
}
if (!used)
return ocelot_pgid_alloc(ocelot, index, mc->ports);
}
return ERR_PTR(-ENOSPC);
}
static void ocelot_encode_ports_to_mdb(unsigned char *addr,
struct ocelot_multicast *mc)
{
ether_addr_copy(addr, mc->addr);
if (mc->entry_type == ENTRYTYPE_MACv4) {
addr[0] = 0;
addr[1] = mc->ports >> 8;
addr[2] = mc->ports & 0xff;
} else if (mc->entry_type == ENTRYTYPE_MACv6) {
addr[0] = mc->ports >> 8;
addr[1] = mc->ports & 0xff;
}
}
int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
const struct switchdev_obj_port_mdb *mdb,
const struct net_device *bridge)
{
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
/* New entry */
mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
mc->entry_type = ocelot_classify_mdb(mdb->addr);
ether_addr_copy(mc->addr, mdb->addr);
mc->vid = vid;
list_add_tail(&mc->list, &ocelot->multicast);
} else {
/* Existing entry. Clean up the current port mask from
* hardware now, because we'll be modifying it.
*/
ocelot_pgid_free(ocelot, mc->pgid);
ocelot_encode_ports_to_mdb(addr, mc);
ocelot_mact_forget(ocelot, addr, vid);
}
mc->ports |= BIT(port);
pgid = ocelot_mdb_get_pgid(ocelot, mc);
if (IS_ERR(pgid)) {
dev_err(ocelot->dev,
"Cannot allocate PGID for mdb %pM vid %d\n",
mc->addr, mc->vid);
devm_kfree(ocelot->dev, mc);
return PTR_ERR(pgid);
}
mc->pgid = pgid;
ocelot_encode_ports_to_mdb(addr, mc);
if (mc->entry_type != ENTRYTYPE_MACv4 &&
mc->entry_type != ENTRYTYPE_MACv6)
ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
pgid->index);
return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
mc->entry_type);
}
EXPORT_SYMBOL(ocelot_port_mdb_add);
int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
const struct switchdev_obj_port_mdb *mdb,
const struct net_device *bridge)
{
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
return -ENOENT;
ocelot_encode_ports_to_mdb(addr, mc);
ocelot_mact_forget(ocelot, addr, vid);
ocelot_pgid_free(ocelot, mc->pgid);
mc->ports &= ~BIT(port);
if (!mc->ports) {
list_del(&mc->list);
devm_kfree(ocelot->dev, mc);
return 0;
}
/* We have a PGID with fewer ports now */
pgid = ocelot_mdb_get_pgid(ocelot, mc);
if (IS_ERR(pgid))
return PTR_ERR(pgid);
mc->pgid = pgid;
ocelot_encode_ports_to_mdb(addr, mc);
if (mc->entry_type != ENTRYTYPE_MACv4 &&
mc->entry_type != ENTRYTYPE_MACv6)
ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
pgid->index);
return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
mc->entry_type);
}
EXPORT_SYMBOL(ocelot_port_mdb_del);
int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
struct net_device *bridge, int bridge_num,
struct netlink_ext_ack *extack)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
err = ocelot_single_vlan_aware_bridge(ocelot, extack);
if (err)
return err;
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port->bridge = bridge;
ocelot_port->bridge_num = bridge_num;
ocelot_apply_bridge_fwd_mask(ocelot, true);
mutex_unlock(&ocelot->fwd_domain_lock);
if (br_vlan_enabled(bridge))
return 0;
return ocelot_add_vlan_unaware_pvid(ocelot, port, bridge);
}
EXPORT_SYMBOL(ocelot_port_bridge_join);
void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
mutex_lock(&ocelot->fwd_domain_lock);
if (!br_vlan_enabled(bridge))
ocelot_del_vlan_unaware_pvid(ocelot, port, bridge);
ocelot_port->bridge = NULL;
ocelot_port->bridge_num = -1;
ocelot_port_set_pvid(ocelot, port, NULL);
ocelot_port_manage_port_tag(ocelot, port);
ocelot_apply_bridge_fwd_mask(ocelot, false);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
{
unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0);
int i, port, lag;
/* Reset destination and aggregation PGIDS */
for_each_unicast_dest_pgid(ocelot, port)
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
for_each_aggr_pgid(ocelot, i)
ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
ANA_PGID_PGID, i);
/* The visited ports bitmask holds the list of ports offloading any
* bonding interface. Initially we mark all these ports as unvisited,
* then every time we visit a port in this bitmask, we know that it is
* the lowest numbered port, i.e. the one whose logical ID == physical
* port ID == LAG ID. So we mark as visited all further ports in the
* bitmask that are offloading the same bonding interface. This way,
* we set up the aggregation PGIDs only once per bonding interface.
*/
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port || !ocelot_port->bond)
continue;
visited &= ~BIT(port);
}
/* Now, set PGIDs for each active LAG */
for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
struct net_device *bond = ocelot->ports[lag]->bond;
int num_active_ports = 0;
unsigned long bond_mask;
u8 aggr_idx[16];
if (!bond || (visited & BIT(lag)))
continue;
bond_mask = ocelot_get_bond_mask(ocelot, bond);
for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
// Destination mask
ocelot_write_rix(ocelot, bond_mask,
ANA_PGID_PGID, port);
if (ocelot_port->lag_tx_active)
aggr_idx[num_active_ports++] = port;
}
for_each_aggr_pgid(ocelot, i) {
u32 ac;
ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
ac &= ~bond_mask;
/* Don't do division by zero if there was no active
* port. Just make all aggregation codes zero.
*/
if (num_active_ports)
ac |= BIT(aggr_idx[i % num_active_ports]);
ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
}
/* Mark all ports in the same LAG as visited to avoid applying
* the same config again.
*/
for (port = lag; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
if (ocelot_port->bond == bond)
visited |= BIT(port);
}
}
}
/* When offloading a bonding interface, the switch ports configured under the
* same bond must have the same logical port ID, equal to the physical port ID
* of the lowest numbered physical port in that bond. Otherwise, in standalone/
* bridged mode, each port has a logical port ID equal to its physical port ID.
*/
static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
{
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct net_device *bond;
if (!ocelot_port)
continue;
bond = ocelot_port->bond;
if (bond) {
int lag = ocelot_bond_get_id(ocelot, bond);
ocelot_rmw_gix(ocelot,
ANA_PORT_PORT_CFG_PORTID_VAL(lag),
ANA_PORT_PORT_CFG_PORTID_VAL_M,
ANA_PORT_PORT_CFG, port);
} else {
ocelot_rmw_gix(ocelot,
ANA_PORT_PORT_CFG_PORTID_VAL(port),
ANA_PORT_PORT_CFG_PORTID_VAL_M,
ANA_PORT_PORT_CFG, port);
}
}
}
static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc,
unsigned long from_mask, unsigned long to_mask)
{
unsigned char addr[ETH_ALEN];
struct ocelot_pgid *pgid;
u16 vid = mc->vid;
dev_dbg(ocelot->dev,
"Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n",
mc->addr, mc->vid, from_mask, to_mask);
/* First clean up the current port mask from hardware, because
* we'll be modifying it.
*/
ocelot_pgid_free(ocelot, mc->pgid);
ocelot_encode_ports_to_mdb(addr, mc);
ocelot_mact_forget(ocelot, addr, vid);
mc->ports &= ~from_mask;
mc->ports |= to_mask;
pgid = ocelot_mdb_get_pgid(ocelot, mc);
if (IS_ERR(pgid)) {
dev_err(ocelot->dev,
"Cannot allocate PGID for mdb %pM vid %d\n",
mc->addr, mc->vid);
devm_kfree(ocelot->dev, mc);
return PTR_ERR(pgid);
}
mc->pgid = pgid;
ocelot_encode_ports_to_mdb(addr, mc);
if (mc->entry_type != ENTRYTYPE_MACv4 &&
mc->entry_type != ENTRYTYPE_MACv6)
ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
pgid->index);
return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
mc->entry_type);
}
int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask,
unsigned long to_mask)
{
struct ocelot_multicast *mc;
int err;
list_for_each_entry(mc, &ocelot->multicast, list) {
if (!(mc->ports & from_mask))
continue;
err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask);
if (err)
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs);
/* Documentation for PORTID_VAL says:
* Logical port number for front port. If port is not a member of a LLAG,
* then PORTID must be set to the physical port number.
* If port is a member of a LLAG, then PORTID must be set to the common
* PORTID_VAL used for all member ports of the LLAG.
* The value must not exceed the number of physical ports on the device.
*
* This means we have little choice but to migrate FDB entries pointing towards
* a logical port when that changes.
*/
static void ocelot_migrate_lag_fdbs(struct ocelot *ocelot,
struct net_device *bond,
int lag)
{
struct ocelot_lag_fdb *fdb;
int err;
lockdep_assert_held(&ocelot->fwd_domain_lock);
list_for_each_entry(fdb, &ocelot->lag_fdbs, list) {
if (fdb->bond != bond)
continue;
err = ocelot_mact_forget(ocelot, fdb->addr, fdb->vid);
if (err) {
dev_err(ocelot->dev,
"failed to delete LAG %s FDB %pM vid %d: %pe\n",
bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
}
err = ocelot_mact_learn(ocelot, lag, fdb->addr, fdb->vid,
ENTRYTYPE_LOCKED);
if (err) {
dev_err(ocelot->dev,
"failed to migrate LAG %s FDB %pM vid %d: %pe\n",
bond->name, fdb->addr, fdb->vid, ERR_PTR(err));
}
}
}
int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond,
struct netdev_lag_upper_info *info,
struct netlink_ext_ack *extack)
{
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
NL_SET_ERR_MSG_MOD(extack,
"Can only offload LAG using hash TX type");
return -EOPNOTSUPP;
}
mutex_lock(&ocelot->fwd_domain_lock);
ocelot->ports[port]->bond = bond;
ocelot_setup_logical_port_ids(ocelot);
ocelot_apply_bridge_fwd_mask(ocelot, true);
ocelot_set_aggr_pgids(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
}
EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
int old_lag_id, new_lag_id;
mutex_lock(&ocelot->fwd_domain_lock);
old_lag_id = ocelot_bond_get_id(ocelot, bond);
ocelot->ports[port]->bond = NULL;
ocelot_setup_logical_port_ids(ocelot);
ocelot_apply_bridge_fwd_mask(ocelot, false);
ocelot_set_aggr_pgids(ocelot);
new_lag_id = ocelot_bond_get_id(ocelot, bond);
if (new_lag_id >= 0 && old_lag_id != new_lag_id)
ocelot_migrate_lag_fdbs(ocelot, bond, new_lag_id);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_leave);
void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
mutex_lock(&ocelot->fwd_domain_lock);
ocelot_port->lag_tx_active = lag_tx_active;
/* Rebalance the LAGs */
ocelot_set_aggr_pgids(ocelot);
mutex_unlock(&ocelot->fwd_domain_lock);
}
EXPORT_SYMBOL(ocelot_port_lag_change);
int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond,
const unsigned char *addr, u16 vid,
const struct net_device *bridge)
{
struct ocelot_lag_fdb *fdb;
int lag, err;
fdb = kzalloc(sizeof(*fdb), GFP_KERNEL);
if (!fdb)
return -ENOMEM;
mutex_lock(&ocelot->fwd_domain_lock);
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
ether_addr_copy(fdb->addr, addr);
fdb->vid = vid;
fdb->bond = bond;
lag = ocelot_bond_get_id(ocelot, bond);
err = ocelot_mact_learn(ocelot, lag, addr, vid, ENTRYTYPE_LOCKED);
if (err) {
mutex_unlock(&ocelot->fwd_domain_lock);
kfree(fdb);
return err;
}
list_add_tail(&fdb->list, &ocelot->lag_fdbs);
mutex_unlock(&ocelot->fwd_domain_lock);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_lag_fdb_add);
int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond,
const unsigned char *addr, u16 vid,
const struct net_device *bridge)
{
struct ocelot_lag_fdb *fdb, *tmp;
mutex_lock(&ocelot->fwd_domain_lock);
if (!vid)
vid = ocelot_vlan_unaware_pvid(ocelot, bridge);
list_for_each_entry_safe(fdb, tmp, &ocelot->lag_fdbs, list) {
if (!ether_addr_equal(fdb->addr, addr) || fdb->vid != vid ||
fdb->bond != bond)
continue;
ocelot_mact_forget(ocelot, addr, vid);
list_del(&fdb->list);
mutex_unlock(&ocelot->fwd_domain_lock);
kfree(fdb);
return 0;
}
mutex_unlock(&ocelot->fwd_domain_lock);
return -ENOENT;
}
EXPORT_SYMBOL_GPL(ocelot_lag_fdb_del);
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
* In the special case that it's the NPI port that we're configuring, the
* length of the tag and optional prefix needs to be accounted for privately,
* in order to be able to sustain communication at the requested @sdu.
*/
void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
int pause_start, pause_stop;
int atop, atop_tot;
if (port == ocelot->npi) {
maxlen += OCELOT_TAG_LEN;
if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
maxlen += OCELOT_SHORT_PREFIX_LEN;
else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
maxlen += OCELOT_LONG_PREFIX_LEN;
}
ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
/* Set Pause watermark hysteresis */
pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ;
pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ;
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START,
pause_start);
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
pause_stop);
/* Tail dropping watermarks */
atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
OCELOT_BUFFER_CELL_SZ;
atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG);
}
EXPORT_SYMBOL(ocelot_port_set_maxlen);
int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
{
int max_mtu = 65535 - ETH_HLEN - ETH_FCS_LEN;
if (port == ocelot->npi) {
max_mtu -= OCELOT_TAG_LEN;
if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
max_mtu -= OCELOT_SHORT_PREFIX_LEN;
else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
max_mtu -= OCELOT_LONG_PREFIX_LEN;
}
return max_mtu;
}
EXPORT_SYMBOL(ocelot_get_max_mtu);
static void ocelot_port_set_learning(struct ocelot *ocelot, int port,
bool enabled)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val = 0;
if (enabled)
val = ANA_PORT_PORT_CFG_LEARN_ENA;
ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA,
ANA_PORT_PORT_CFG, port);
ocelot_port->learn_ena = enabled;
}
static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port,
bool enabled)
{
u32 val = 0;
if (enabled)
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC);
}
static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
bool enabled)
{
u32 val = 0;
if (enabled)
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
}
static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
bool enabled)
{
u32 val = 0;
if (enabled)
val = BIT(port);
ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC);
}
int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags flags)
{
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
BR_BCAST_FLOOD))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
struct switchdev_brport_flags flags)
{
if (flags.mask & BR_LEARNING)
ocelot_port_set_learning(ocelot, port,
!!(flags.val & BR_LEARNING));
if (flags.mask & BR_FLOOD)
ocelot_port_set_ucast_flood(ocelot, port,
!!(flags.val & BR_FLOOD));
if (flags.mask & BR_MCAST_FLOOD)
ocelot_port_set_mcast_flood(ocelot, port,
!!(flags.val & BR_MCAST_FLOOD));
if (flags.mask & BR_BCAST_FLOOD)
ocelot_port_set_bcast_flood(ocelot, port,
!!(flags.val & BR_BCAST_FLOOD));
}
EXPORT_SYMBOL(ocelot_port_bridge_flags);
int ocelot_port_get_default_prio(struct ocelot *ocelot, int port)
{
int val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
return ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
}
EXPORT_SYMBOL_GPL(ocelot_port_get_default_prio);
int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
{
if (prio >= OCELOT_NUM_TC)
return -ERANGE;
ocelot_rmw_gix(ocelot,
ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(prio),
ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M,
ANA_PORT_QOS_CFG,
port);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp)
{
int qos_cfg = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
/* Return error if DSCP prioritization isn't enabled */
if (!(qos_cfg & ANA_PORT_QOS_CFG_QOS_DSCP_ENA))
return -EOPNOTSUPP;
if (qos_cfg & ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA) {
dscp = ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(dscp_cfg);
/* Re-read ANA_DSCP_CFG for the translated DSCP */
dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
}
/* If the DSCP value is not trusted, the QoS classification falls back
* to VLAN PCP or port-based default.
*/
if (!(dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA))
return -EOPNOTSUPP;
return ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg);
}
EXPORT_SYMBOL_GPL(ocelot_port_get_dscp_prio);
int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
{
int mask, val;
if (prio >= OCELOT_NUM_TC)
return -ERANGE;
/* There is at least one app table priority (this one), so we need to
* make sure DSCP prioritization is enabled on the port.
* Also make sure DSCP translation is disabled
* (dcbnl doesn't support it).
*/
mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_DSCP_ENA, mask,
ANA_PORT_QOS_CFG, port);
/* Trust this DSCP value and map it to the given QoS class */
val = ANA_DSCP_CFG_DSCP_TRUST_ENA | ANA_DSCP_CFG_QOS_DSCP_VAL(prio);
ocelot_write_rix(ocelot, val, ANA_DSCP_CFG, dscp);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_port_add_dscp_prio);
int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio)
{
int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, dscp);
int mask, i;
/* During a "dcb app replace" command, the new app table entry will be
* added first, then the old one will be deleted. But the hardware only
* supports one QoS class per DSCP value (duh), so if we blindly delete
* the app table entry for this DSCP value, we end up deleting the
* entry with the new priority. Avoid that by checking whether user
* space wants to delete the priority which is currently configured, or
* something else which is no longer current.
*/
if (ANA_DSCP_CFG_QOS_DSCP_VAL_X(dscp_cfg) != prio)
return 0;
/* Untrust this DSCP value */
ocelot_write_rix(ocelot, 0, ANA_DSCP_CFG, dscp);
for (i = 0; i < 64; i++) {
int dscp_cfg = ocelot_read_rix(ocelot, ANA_DSCP_CFG, i);
/* There are still app table entries on the port, so we need to
* keep DSCP enabled, nothing to do.
*/
if (dscp_cfg & ANA_DSCP_CFG_DSCP_TRUST_ENA)
return 0;
}
/* Disable DSCP QoS classification if there isn't any trusted
* DSCP value left.
*/
mask = ANA_PORT_QOS_CFG_QOS_DSCP_ENA |
ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA;
ocelot_rmw_gix(ocelot, 0, mask, ANA_PORT_QOS_CFG, port);
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_port_del_dscp_prio);
struct ocelot_mirror *ocelot_mirror_get(struct ocelot *ocelot, int to,
struct netlink_ext_ack *extack)
{
struct ocelot_mirror *m = ocelot->mirror;
if (m) {
if (m->to != to) {
NL_SET_ERR_MSG_MOD(extack,
"Mirroring already configured towards different egress port");
return ERR_PTR(-EBUSY);
}
refcount_inc(&m->refcount);
return m;
}
m = kzalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
m->to = to;
refcount_set(&m->refcount, 1);
ocelot->mirror = m;
/* Program the mirror port to hardware */
ocelot_write(ocelot, BIT(to), ANA_MIRRORPORTS);
return m;
}
void ocelot_mirror_put(struct ocelot *ocelot)
{
struct ocelot_mirror *m = ocelot->mirror;
if (!refcount_dec_and_test(&m->refcount))
return;
ocelot_write(ocelot, 0, ANA_MIRRORPORTS);
ocelot->mirror = NULL;
kfree(m);
}
int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to,
bool ingress, struct netlink_ext_ack *extack)
{
struct ocelot_mirror *m = ocelot_mirror_get(ocelot, to, extack);
if (IS_ERR(m))
return PTR_ERR(m);
if (ingress) {
ocelot_rmw_gix(ocelot, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
ANA_PORT_PORT_CFG, from);
} else {
ocelot_rmw(ocelot, BIT(from), BIT(from),
ANA_EMIRRORPORTS);
}
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_port_mirror_add);
void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress)
{
if (ingress) {
ocelot_rmw_gix(ocelot, 0, ANA_PORT_PORT_CFG_SRC_MIRROR_ENA,
ANA_PORT_PORT_CFG, from);
} else {
ocelot_rmw(ocelot, 0, BIT(from), ANA_EMIRRORPORTS);
}
ocelot_mirror_put(ocelot);
}
EXPORT_SYMBOL_GPL(ocelot_port_mirror_del);
static void ocelot_port_reset_mqprio(struct ocelot *ocelot, int port)
{
struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port);
netdev_reset_tc(dev);
ocelot_port_change_fp(ocelot, port, 0);
}
int ocelot_port_mqprio(struct ocelot *ocelot, int port,
struct tc_mqprio_qopt_offload *mqprio)
{
struct net_device *dev = ocelot->ops->port_to_netdev(ocelot, port);
struct netlink_ext_ack *extack = mqprio->extack;
struct tc_mqprio_qopt *qopt = &mqprio->qopt;
int num_tc = qopt->num_tc;
int tc, err;
if (!num_tc) {
ocelot_port_reset_mqprio(ocelot, port);
return 0;
}
err = netdev_set_num_tc(dev, num_tc);
if (err)
return err;
for (tc = 0; tc < num_tc; tc++) {
if (qopt->count[tc] != 1) {
NL_SET_ERR_MSG_MOD(extack,
"Only one TXQ per TC supported");
return -EINVAL;
}
err = netdev_set_tc_queue(dev, tc, 1, qopt->offset[tc]);
if (err)
goto err_reset_tc;
}
err = netif_set_real_num_tx_queues(dev, num_tc);
if (err)
goto err_reset_tc;
ocelot_port_change_fp(ocelot, port, mqprio->preemptible_tcs);
return 0;
err_reset_tc:
ocelot_port_reset_mqprio(ocelot, port);
return err;
}
EXPORT_SYMBOL_GPL(ocelot_port_mqprio);
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
skb_queue_head_init(&ocelot_port->tx_skbs);
/* Basic L2 initialization */
/* Set MAC IFG Gaps
* FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0
* !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5
*/
ocelot_port_writel(ocelot_port, DEV_MAC_IFG_CFG_TX_IFG(5),
DEV_MAC_IFG_CFG);
/* Load seed (0) and set MAC HDX late collision */
ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) |
DEV_MAC_HDX_CFG_SEED_LOAD,
DEV_MAC_HDX_CFG);
mdelay(1);
ocelot_port_writel(ocelot_port, DEV_MAC_HDX_CFG_LATE_COL_POS(67),
DEV_MAC_HDX_CFG);
/* Set Max Length and maximum tags allowed */
ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
DEV_MAC_TAGS_CFG);
/* Set SMAC of Pause frame (00:00:00:00:00:00) */
ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
/* Enable transmission of pause frames */
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
/* Drop frames with multicast source address */
ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
ANA_PORT_DROP_CFG, port);
/* Set default VLAN and tag type to 8021Q. */
ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_TPID(ETH_P_8021Q),
REW_PORT_VLAN_CFG_PORT_TPID_M,
REW_PORT_VLAN_CFG, port);
/* Disable source address learning for standalone mode */
ocelot_port_set_learning(ocelot, port, false);
/* Set the port's initial logical port ID value, enable receiving
* frames on it, and configure the MAC address learning type to
* automatic.
*/
ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
ANA_PORT_PORT_CFG_RECV_ENA |
ANA_PORT_PORT_CFG_PORTID_VAL(port),
ANA_PORT_PORT_CFG, port);
/* Enable vcap lookups */
ocelot_vcap_enable(ocelot, port);
}
EXPORT_SYMBOL(ocelot_init_port);
/* Configure and enable the CPU port module, which is a set of queues
* accessible through register MMIO, frame DMA or Ethernet (in case
* NPI mode is used).
*/
static void ocelot_cpu_port_init(struct ocelot *ocelot)
{
int cpu = ocelot->num_phys_ports;
/* The unicast destination PGID for the CPU port module is unused */
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
/* Instead set up a multicast destination PGID for traffic copied to
* the CPU. Whitelisted MAC addresses like the port netdevice MAC
* addresses will be copied to the CPU via this PGID.
*/
ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU);
ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA |
ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
ANA_PORT_PORT_CFG, cpu);
/* Enable CPU port module */
ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
/* CPU port Injection/Extraction configuration */
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
OCELOT_TAG_PREFIX_NONE);
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
OCELOT_TAG_PREFIX_NONE);
/* Configure the CPU port to be VLAN aware */
ocelot_write_gix(ocelot,
ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_STANDALONE_PVID) |
ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
}
static void ocelot_detect_features(struct ocelot *ocelot)
{
int mmgt, eq_ctrl;
/* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
* the number of 240-byte free memory words (aka 4-cell chunks) and not
* 192 bytes as the documentation incorrectly says.
*/
mmgt = ocelot_read(ocelot, SYS_MMGT);
ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
}
static int ocelot_mem_init_status(struct ocelot *ocelot)
{
unsigned int val;
int err;
err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
&val);
return err ?: val;
}
int ocelot_reset(struct ocelot *ocelot)
{
int err;
u32 val;
err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
if (err)
return err;
err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
if (err)
return err;
/* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
* 100us) before enabling the switch core.
*/
err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
if (err)
return err;
err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
if (err)
return err;
return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
}
EXPORT_SYMBOL(ocelot_reset);
int ocelot_init(struct ocelot *ocelot)
{
int i, ret;
u32 port;
if (ocelot->ops->reset) {
ret = ocelot->ops->reset(ocelot);
if (ret) {
dev_err(ocelot->dev, "Switch reset failed\n");
return ret;
}
}
mutex_init(&ocelot->mact_lock);
mutex_init(&ocelot->fwd_domain_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
if (!ocelot->owq)
return -ENOMEM;
ret = ocelot_stats_init(ocelot);
if (ret)
goto err_stats_init;
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
INIT_LIST_HEAD(&ocelot->vlans);
INIT_LIST_HEAD(&ocelot->lag_fdbs);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
ocelot_cpu_port_init(ocelot);
if (ocelot->ops->psfp_init)
ocelot->ops->psfp_init(ocelot);
if (ocelot->mm_supported) {
ret = ocelot_mm_init(ocelot);
if (ret)
goto err_mm_init;
}
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f),
SYS_STAT_CFG);
}
/* Only use S-Tag */
ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG);
/* Aggregation mode */
ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
ANA_AGGR_CFG_AC_DMAC_ENA |
ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA |
ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA |
ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA,
ANA_AGGR_CFG);
/* Set MAC age time to default value. The entry is aged after
* 2*AGE_PERIOD
*/
ocelot_write(ocelot,
ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ),
ANA_AUTOAGE);
/* Disable learning for frames discarded by VLAN ingress filtering */
regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1);
/* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */
ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA |
SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING);
/* Setup flooding PGIDs */
for (i = 0; i < ocelot->num_flooding_pgids; i++)
ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
ANA_FLOODING_FLD_BROADCAST(PGID_BC) |
ANA_FLOODING_FLD_UNICAST(PGID_UC),
ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) |
ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) |
ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC),
ANA_FLOODING_IPMC);
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Transmit the frame to the local port. */
ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
/* Do not forward BPDU frames to the front ports. */
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
port);
/* Ensure bridging is disabled */
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
}
for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
}
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_BLACKHOLE);
/* Allow broadcast and unknown L2 multicast to the CPU. */
ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID, PGID_MC);
ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
ANA_PGID_PGID, PGID_BC);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
/* Allow manual injection via DEVCPU_QS registers, and byte swap these
* registers endianness.
*/
ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP |
QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0);
ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP |
QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0);
ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) |
ANA_CPUQ_CFG_CPUQ_LRN(2) |
ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) |
ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) |
ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) |
ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) |
ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) |
ANA_CPUQ_CFG_CPUQ_IGMP(6) |
ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG);
for (i = 0; i < 16; i++)
ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) |
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
return 0;
err_mm_init:
ocelot_stats_deinit(ocelot);
err_stats_init:
destroy_workqueue(ocelot->owq);
return ret;
}
EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
ocelot_stats_deinit(ocelot);
destroy_workqueue(ocelot->owq);
}
EXPORT_SYMBOL(ocelot_deinit);
void ocelot_deinit_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
skb_queue_purge(&ocelot_port->tx_skbs);
}
EXPORT_SYMBOL(ocelot_deinit_port);
MODULE_LICENSE("Dual MIT/GPL");
|
linux-master
|
drivers/net/ethernet/mscc/ocelot.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017 Microsemi Corporation
*/
#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/netdevice.h>
#include <linux/phylink.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/skbuff.h>
#include <net/switchdev.h>
#include <soc/mscc/ocelot.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/vsc7514_regs.h>
#include "ocelot_fdma.h"
#include "ocelot.h"
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
{
int ret;
ocelot->map = vsc7514_regmap;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
ret = ocelot_regfields_init(ocelot, vsc7514_regfields);
if (ret)
return ret;
ocelot_pll5_init(ocelot);
eth_random_addr(ocelot->base_mac);
ocelot->base_mac[5] &= 0xf0;
return 0;
}
static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
{
struct ocelot *ocelot = arg;
int grp = 0, err;
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
if (err)
goto out;
skb->dev->stats.rx_bytes += skb->len;
skb->dev->stats.rx_packets++;
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
}
out:
if (err < 0)
ocelot_drain_cpu_queue(ocelot, 0);
return IRQ_HANDLED;
}
static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
{
struct ocelot *ocelot = arg;
ocelot_get_txtstamp(ocelot);
return IRQ_HANDLED;
}
static const struct of_device_id mscc_ocelot_match[] = {
{ .compatible = "mscc,vsc7514-switch" },
{ }
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
.wm_dec = ocelot_wm_dec,
.wm_stat = ocelot_wm_stat,
.port_to_netdev = ocelot_port_to_netdev,
.netdev_to_port = ocelot_netdev_to_port,
};
static struct ptp_clock_info ocelot_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "ocelot ptp",
.max_adj = 0x7fffffff,
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = OCELOT_PTP_PINS_NUM,
.n_pins = OCELOT_PTP_PINS_NUM,
.pps = 0,
.gettime64 = ocelot_ptp_gettime64,
.settime64 = ocelot_ptp_settime64,
.adjtime = ocelot_ptp_adjtime,
.adjfine = ocelot_ptp_adjfine,
.verify = ocelot_ptp_verify,
.enable = ocelot_ptp_enable,
};
static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot)
{
int port;
for (port = 0; port < ocelot->num_phys_ports; port++)
ocelot_port_devlink_teardown(ocelot, port);
}
static void mscc_ocelot_release_ports(struct ocelot *ocelot)
{
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
ocelot_deinit_port(ocelot, port);
ocelot_release_port(ocelot_port);
}
}
static int mscc_ocelot_init_ports(struct platform_device *pdev,
struct device_node *ports)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
u32 devlink_ports_registered = 0;
struct device_node *portnp;
int port, err;
u32 reg;
ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
if (!ocelot->ports)
return -ENOMEM;
ocelot->devlink_ports = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports,
sizeof(*ocelot->devlink_ports),
GFP_KERNEL);
if (!ocelot->devlink_ports)
return -ENOMEM;
for_each_available_child_of_node(ports, portnp) {
struct regmap *target;
struct resource *res;
char res_name[8];
if (of_property_read_u32(portnp, "reg", ®))
continue;
port = reg;
if (port < 0 || port >= ocelot->num_phys_ports) {
dev_err(ocelot->dev,
"invalid port number: %d >= %d\n", port,
ocelot->num_phys_ports);
continue;
}
snprintf(res_name, sizeof(res_name), "port%d", port);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
res_name);
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
err = PTR_ERR(target);
of_node_put(portnp);
goto out_teardown;
}
err = ocelot_port_devlink_init(ocelot, port,
DEVLINK_PORT_FLAVOUR_PHYSICAL);
if (err) {
of_node_put(portnp);
goto out_teardown;
}
err = ocelot_probe_port(ocelot, port, target, portnp);
if (err) {
ocelot_port_devlink_teardown(ocelot, port);
continue;
}
devlink_ports_registered |= BIT(port);
}
/* Initialize unused devlink ports at the end */
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (devlink_ports_registered & BIT(port))
continue;
err = ocelot_port_devlink_init(ocelot, port,
DEVLINK_PORT_FLAVOUR_UNUSED);
if (err)
goto out_teardown;
devlink_ports_registered |= BIT(port);
}
return 0;
out_teardown:
/* Unregister the network interfaces */
mscc_ocelot_release_ports(ocelot);
/* Tear down devlink ports for the registered network interfaces */
for (port = 0; port < ocelot->num_phys_ports; port++) {
if (devlink_ports_registered & BIT(port))
ocelot_port_devlink_teardown(ocelot, port);
}
return err;
}
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int err, irq_xtr, irq_ptp_rdy;
struct device_node *ports;
struct devlink *devlink;
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
struct {
enum ocelot_target id;
char *name;
u8 optional:1;
} io_target[] = {
{ SYS, "sys" },
{ REW, "rew" },
{ QSYS, "qsys" },
{ ANA, "ana" },
{ QS, "qs" },
{ S0, "s0" },
{ S1, "s1" },
{ S2, "s2" },
{ PTP, "ptp", 1 },
{ FDMA, "fdma", 1 },
};
if (!np && !pdev->dev.platform_data)
return -ENODEV;
devlink =
devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot), &pdev->dev);
if (!devlink)
return -ENOMEM;
ocelot = devlink_priv(devlink);
ocelot->devlink = priv_to_devlink(ocelot);
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
for (i = 0; i < ARRAY_SIZE(io_target); i++) {
struct regmap *target;
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
io_target[i].name);
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
if (io_target[i].optional) {
ocelot->targets[io_target[i].id] = NULL;
continue;
}
err = PTR_ERR(target);
goto out_free_devlink;
}
ocelot->targets[io_target[i].id] = target;
}
if (ocelot->targets[FDMA])
ocelot_fdma_init(pdev, ocelot);
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
err = PTR_ERR(hsio);
goto out_free_devlink;
}
ocelot->targets[HSIO] = hsio;
err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
goto out_free_devlink;
irq_xtr = platform_get_irq_byname(pdev, "xtr");
if (irq_xtr < 0) {
err = irq_xtr;
goto out_free_devlink;
}
err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
goto out_free_devlink;
irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL,
ocelot_ptp_rdy_irq_handler,
IRQF_ONESHOT, "ptp ready",
ocelot);
if (err)
goto out_free_devlink;
/* Both the PTP interrupt and the PTP bank are available */
ocelot->ptp = 1;
}
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(ocelot->dev, "no ethernet-ports child node found\n");
err = -ENODEV;
goto out_free_devlink;
}
ocelot->num_phys_ports = of_get_child_count(ports);
ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
ocelot->vcap_pol.base = VSC7514_VCAP_POLICER_BASE;
ocelot->vcap_pol.max = VSC7514_VCAP_POLICER_MAX;
ocelot->npi = -1;
err = ocelot_init(ocelot);
if (err)
goto out_put_ports;
err = mscc_ocelot_init_ports(pdev, ports);
if (err)
goto out_ocelot_devlink_unregister;
if (ocelot->fdma)
ocelot_fdma_start(ocelot);
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_ocelot_release_ports;
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
if (err) {
dev_err(ocelot->dev,
"Timestamp initialization failed\n");
ocelot->ptp = 0;
}
}
register_netdevice_notifier(&ocelot_netdevice_nb);
register_switchdev_notifier(&ocelot_switchdev_nb);
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
of_node_put(ports);
devlink_register(devlink);
dev_info(&pdev->dev, "Ocelot switch probed\n");
return 0;
out_ocelot_release_ports:
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
out_ocelot_devlink_unregister:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
out_free_devlink:
devlink_free(devlink);
return err;
}
static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
if (ocelot->fdma)
ocelot_fdma_deinit(ocelot);
devlink_unregister(ocelot->devlink);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
devlink_free(ocelot->devlink);
return 0;
}
static struct platform_driver mscc_ocelot_driver = {
.probe = mscc_ocelot_probe,
.remove = mscc_ocelot_remove,
.driver = {
.name = "ocelot-switch",
.of_match_table = mscc_ocelot_match,
},
};
module_platform_driver(mscc_ocelot_driver);
MODULE_DESCRIPTION("Microsemi Ocelot switch driver");
MODULE_AUTHOR("Alexandre Belloni <[email protected]>");
MODULE_LICENSE("Dual MIT/GPL");
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_vsc7514.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017 Microsemi Corporation
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include "ocelot.h"
int __ocelot_bulk_read_ix(struct ocelot *ocelot, enum ocelot_reg reg,
u32 offset, void *buf, int count)
{
enum ocelot_target target;
u32 addr;
ocelot_reg_to_target_addr(ocelot, reg, &target, &addr);
WARN_ON(!target);
return regmap_bulk_read(ocelot->targets[target], addr + offset,
buf, count);
}
EXPORT_SYMBOL_GPL(__ocelot_bulk_read_ix);
u32 __ocelot_read_ix(struct ocelot *ocelot, enum ocelot_reg reg, u32 offset)
{
enum ocelot_target target;
u32 addr, val;
ocelot_reg_to_target_addr(ocelot, reg, &target, &addr);
WARN_ON(!target);
regmap_read(ocelot->targets[target], addr + offset, &val);
return val;
}
EXPORT_SYMBOL_GPL(__ocelot_read_ix);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, enum ocelot_reg reg,
u32 offset)
{
enum ocelot_target target;
u32 addr;
ocelot_reg_to_target_addr(ocelot, reg, &target, &addr);
WARN_ON(!target);
regmap_write(ocelot->targets[target], addr + offset, val);
}
EXPORT_SYMBOL_GPL(__ocelot_write_ix);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask,
enum ocelot_reg reg, u32 offset)
{
enum ocelot_target target;
u32 addr;
ocelot_reg_to_target_addr(ocelot, reg, &target, &addr);
WARN_ON(!target);
regmap_update_bits(ocelot->targets[target], addr + offset, mask, val);
}
EXPORT_SYMBOL_GPL(__ocelot_rmw_ix);
u32 ocelot_port_readl(struct ocelot_port *port, enum ocelot_reg reg)
{
struct ocelot *ocelot = port->ocelot;
u16 target = reg >> TARGET_OFFSET;
u32 val;
WARN_ON(!target);
regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
return val;
}
EXPORT_SYMBOL_GPL(ocelot_port_readl);
void ocelot_port_writel(struct ocelot_port *port, u32 val, enum ocelot_reg reg)
{
struct ocelot *ocelot = port->ocelot;
u16 target = reg >> TARGET_OFFSET;
WARN_ON(!target);
regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
}
EXPORT_SYMBOL_GPL(ocelot_port_writel);
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask,
enum ocelot_reg reg)
{
u32 cur = ocelot_port_readl(port, reg);
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
}
EXPORT_SYMBOL_GPL(ocelot_port_rmwl);
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
{
u32 val;
regmap_read(ocelot->targets[target],
ocelot->map[target][reg] + offset, &val);
return val;
}
void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 val, u32 reg, u32 offset)
{
regmap_write(ocelot->targets[target],
ocelot->map[target][reg] + offset, val);
}
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields)
{
unsigned int i;
u16 target;
for (i = 0; i < REGFIELD_MAX; i++) {
struct reg_field regfield = {};
u32 reg = regfields[i].reg;
if (!reg)
continue;
target = regfields[i].reg >> TARGET_OFFSET;
regfield.reg = ocelot->map[target][reg & REG_MASK];
regfield.lsb = regfields[i].lsb;
regfield.msb = regfields[i].msb;
regfield.id_size = regfields[i].id_size;
regfield.id_offset = regfields[i].id_offset;
ocelot->regfields[i] =
devm_regmap_field_alloc(ocelot->dev,
ocelot->targets[target],
regfield);
if (IS_ERR(ocelot->regfields[i]))
return PTR_ERR(ocelot->regfields[i]);
}
return 0;
}
EXPORT_SYMBOL_GPL(ocelot_regfields_init);
static struct regmap_config ocelot_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
};
struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res)
{
void __iomem *regs;
regs = devm_ioremap_resource(ocelot->dev, res);
if (IS_ERR(regs))
return ERR_CAST(regs);
ocelot_regmap_config.name = res->name;
return devm_regmap_init_mmio(ocelot->dev, regs, &ocelot_regmap_config);
}
EXPORT_SYMBOL_GPL(ocelot_regmap_init);
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_io.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Statistics for Ocelot switch family
*
* Copyright (c) 2017 Microsemi Corporation
* Copyright 2022 NXP
*/
#include <linux/ethtool_netlink.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include "ocelot.h"
enum ocelot_stat {
OCELOT_STAT_RX_OCTETS,
OCELOT_STAT_RX_UNICAST,
OCELOT_STAT_RX_MULTICAST,
OCELOT_STAT_RX_BROADCAST,
OCELOT_STAT_RX_SHORTS,
OCELOT_STAT_RX_FRAGMENTS,
OCELOT_STAT_RX_JABBERS,
OCELOT_STAT_RX_CRC_ALIGN_ERRS,
OCELOT_STAT_RX_SYM_ERRS,
OCELOT_STAT_RX_64,
OCELOT_STAT_RX_65_127,
OCELOT_STAT_RX_128_255,
OCELOT_STAT_RX_256_511,
OCELOT_STAT_RX_512_1023,
OCELOT_STAT_RX_1024_1526,
OCELOT_STAT_RX_1527_MAX,
OCELOT_STAT_RX_PAUSE,
OCELOT_STAT_RX_CONTROL,
OCELOT_STAT_RX_LONGS,
OCELOT_STAT_RX_CLASSIFIED_DROPS,
OCELOT_STAT_RX_RED_PRIO_0,
OCELOT_STAT_RX_RED_PRIO_1,
OCELOT_STAT_RX_RED_PRIO_2,
OCELOT_STAT_RX_RED_PRIO_3,
OCELOT_STAT_RX_RED_PRIO_4,
OCELOT_STAT_RX_RED_PRIO_5,
OCELOT_STAT_RX_RED_PRIO_6,
OCELOT_STAT_RX_RED_PRIO_7,
OCELOT_STAT_RX_YELLOW_PRIO_0,
OCELOT_STAT_RX_YELLOW_PRIO_1,
OCELOT_STAT_RX_YELLOW_PRIO_2,
OCELOT_STAT_RX_YELLOW_PRIO_3,
OCELOT_STAT_RX_YELLOW_PRIO_4,
OCELOT_STAT_RX_YELLOW_PRIO_5,
OCELOT_STAT_RX_YELLOW_PRIO_6,
OCELOT_STAT_RX_YELLOW_PRIO_7,
OCELOT_STAT_RX_GREEN_PRIO_0,
OCELOT_STAT_RX_GREEN_PRIO_1,
OCELOT_STAT_RX_GREEN_PRIO_2,
OCELOT_STAT_RX_GREEN_PRIO_3,
OCELOT_STAT_RX_GREEN_PRIO_4,
OCELOT_STAT_RX_GREEN_PRIO_5,
OCELOT_STAT_RX_GREEN_PRIO_6,
OCELOT_STAT_RX_GREEN_PRIO_7,
OCELOT_STAT_RX_ASSEMBLY_ERRS,
OCELOT_STAT_RX_SMD_ERRS,
OCELOT_STAT_RX_ASSEMBLY_OK,
OCELOT_STAT_RX_MERGE_FRAGMENTS,
OCELOT_STAT_RX_PMAC_OCTETS,
OCELOT_STAT_RX_PMAC_UNICAST,
OCELOT_STAT_RX_PMAC_MULTICAST,
OCELOT_STAT_RX_PMAC_BROADCAST,
OCELOT_STAT_RX_PMAC_SHORTS,
OCELOT_STAT_RX_PMAC_FRAGMENTS,
OCELOT_STAT_RX_PMAC_JABBERS,
OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS,
OCELOT_STAT_RX_PMAC_SYM_ERRS,
OCELOT_STAT_RX_PMAC_64,
OCELOT_STAT_RX_PMAC_65_127,
OCELOT_STAT_RX_PMAC_128_255,
OCELOT_STAT_RX_PMAC_256_511,
OCELOT_STAT_RX_PMAC_512_1023,
OCELOT_STAT_RX_PMAC_1024_1526,
OCELOT_STAT_RX_PMAC_1527_MAX,
OCELOT_STAT_RX_PMAC_PAUSE,
OCELOT_STAT_RX_PMAC_CONTROL,
OCELOT_STAT_RX_PMAC_LONGS,
OCELOT_STAT_TX_OCTETS,
OCELOT_STAT_TX_UNICAST,
OCELOT_STAT_TX_MULTICAST,
OCELOT_STAT_TX_BROADCAST,
OCELOT_STAT_TX_COLLISION,
OCELOT_STAT_TX_DROPS,
OCELOT_STAT_TX_PAUSE,
OCELOT_STAT_TX_64,
OCELOT_STAT_TX_65_127,
OCELOT_STAT_TX_128_255,
OCELOT_STAT_TX_256_511,
OCELOT_STAT_TX_512_1023,
OCELOT_STAT_TX_1024_1526,
OCELOT_STAT_TX_1527_MAX,
OCELOT_STAT_TX_YELLOW_PRIO_0,
OCELOT_STAT_TX_YELLOW_PRIO_1,
OCELOT_STAT_TX_YELLOW_PRIO_2,
OCELOT_STAT_TX_YELLOW_PRIO_3,
OCELOT_STAT_TX_YELLOW_PRIO_4,
OCELOT_STAT_TX_YELLOW_PRIO_5,
OCELOT_STAT_TX_YELLOW_PRIO_6,
OCELOT_STAT_TX_YELLOW_PRIO_7,
OCELOT_STAT_TX_GREEN_PRIO_0,
OCELOT_STAT_TX_GREEN_PRIO_1,
OCELOT_STAT_TX_GREEN_PRIO_2,
OCELOT_STAT_TX_GREEN_PRIO_3,
OCELOT_STAT_TX_GREEN_PRIO_4,
OCELOT_STAT_TX_GREEN_PRIO_5,
OCELOT_STAT_TX_GREEN_PRIO_6,
OCELOT_STAT_TX_GREEN_PRIO_7,
OCELOT_STAT_TX_AGED,
OCELOT_STAT_TX_MM_HOLD,
OCELOT_STAT_TX_MERGE_FRAGMENTS,
OCELOT_STAT_TX_PMAC_OCTETS,
OCELOT_STAT_TX_PMAC_UNICAST,
OCELOT_STAT_TX_PMAC_MULTICAST,
OCELOT_STAT_TX_PMAC_BROADCAST,
OCELOT_STAT_TX_PMAC_PAUSE,
OCELOT_STAT_TX_PMAC_64,
OCELOT_STAT_TX_PMAC_65_127,
OCELOT_STAT_TX_PMAC_128_255,
OCELOT_STAT_TX_PMAC_256_511,
OCELOT_STAT_TX_PMAC_512_1023,
OCELOT_STAT_TX_PMAC_1024_1526,
OCELOT_STAT_TX_PMAC_1527_MAX,
OCELOT_STAT_DROP_LOCAL,
OCELOT_STAT_DROP_TAIL,
OCELOT_STAT_DROP_YELLOW_PRIO_0,
OCELOT_STAT_DROP_YELLOW_PRIO_1,
OCELOT_STAT_DROP_YELLOW_PRIO_2,
OCELOT_STAT_DROP_YELLOW_PRIO_3,
OCELOT_STAT_DROP_YELLOW_PRIO_4,
OCELOT_STAT_DROP_YELLOW_PRIO_5,
OCELOT_STAT_DROP_YELLOW_PRIO_6,
OCELOT_STAT_DROP_YELLOW_PRIO_7,
OCELOT_STAT_DROP_GREEN_PRIO_0,
OCELOT_STAT_DROP_GREEN_PRIO_1,
OCELOT_STAT_DROP_GREEN_PRIO_2,
OCELOT_STAT_DROP_GREEN_PRIO_3,
OCELOT_STAT_DROP_GREEN_PRIO_4,
OCELOT_STAT_DROP_GREEN_PRIO_5,
OCELOT_STAT_DROP_GREEN_PRIO_6,
OCELOT_STAT_DROP_GREEN_PRIO_7,
OCELOT_NUM_STATS,
};
struct ocelot_stat_layout {
enum ocelot_reg reg;
char name[ETH_GSTRING_LEN];
};
/* 32-bit counter checked for wraparound by ocelot_port_update_stats()
* and copied to ocelot->stats.
*/
#define OCELOT_STAT(kind) \
[OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind }
/* Same as above, except also exported to ethtool -S. Standard counters should
* only be exposed to more specific interfaces rather than by their string name.
*/
#define OCELOT_STAT_ETHTOOL(kind, ethtool_name) \
[OCELOT_STAT_ ## kind] = { .reg = SYS_COUNT_ ## kind, .name = ethtool_name }
#define OCELOT_COMMON_STATS \
OCELOT_STAT_ETHTOOL(RX_OCTETS, "rx_octets"), \
OCELOT_STAT_ETHTOOL(RX_UNICAST, "rx_unicast"), \
OCELOT_STAT_ETHTOOL(RX_MULTICAST, "rx_multicast"), \
OCELOT_STAT_ETHTOOL(RX_BROADCAST, "rx_broadcast"), \
OCELOT_STAT_ETHTOOL(RX_SHORTS, "rx_shorts"), \
OCELOT_STAT_ETHTOOL(RX_FRAGMENTS, "rx_fragments"), \
OCELOT_STAT_ETHTOOL(RX_JABBERS, "rx_jabbers"), \
OCELOT_STAT_ETHTOOL(RX_CRC_ALIGN_ERRS, "rx_crc_align_errs"), \
OCELOT_STAT_ETHTOOL(RX_SYM_ERRS, "rx_sym_errs"), \
OCELOT_STAT_ETHTOOL(RX_64, "rx_frames_below_65_octets"), \
OCELOT_STAT_ETHTOOL(RX_65_127, "rx_frames_65_to_127_octets"), \
OCELOT_STAT_ETHTOOL(RX_128_255, "rx_frames_128_to_255_octets"), \
OCELOT_STAT_ETHTOOL(RX_256_511, "rx_frames_256_to_511_octets"), \
OCELOT_STAT_ETHTOOL(RX_512_1023, "rx_frames_512_to_1023_octets"), \
OCELOT_STAT_ETHTOOL(RX_1024_1526, "rx_frames_1024_to_1526_octets"), \
OCELOT_STAT_ETHTOOL(RX_1527_MAX, "rx_frames_over_1526_octets"), \
OCELOT_STAT_ETHTOOL(RX_PAUSE, "rx_pause"), \
OCELOT_STAT_ETHTOOL(RX_CONTROL, "rx_control"), \
OCELOT_STAT_ETHTOOL(RX_LONGS, "rx_longs"), \
OCELOT_STAT_ETHTOOL(RX_CLASSIFIED_DROPS, "rx_classified_drops"), \
OCELOT_STAT_ETHTOOL(RX_RED_PRIO_0, "rx_red_prio_0"), \
OCELOT_STAT_ETHTOOL(RX_RED_PRIO_1, "rx_red_prio_1"), \
OCELOT_STAT_ETHTOOL(RX_RED_PRIO_2, "rx_red_prio_2"), \
OCELOT_STAT_ETHTOOL(RX_RED_PRIO_3, "rx_red_prio_3"), \
OCELOT_STAT_ETHTOOL(RX_RED_PRIO_4, "rx_red_prio_4"), \
OCELOT_STAT_ETHTOOL(RX_RED_PRIO_5, "rx_red_prio_5"), \
OCELOT_STAT_ETHTOOL(RX_RED_PRIO_6, "rx_red_prio_6"), \
OCELOT_STAT_ETHTOOL(RX_RED_PRIO_7, "rx_red_prio_7"), \
OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_0, "rx_yellow_prio_0"), \
OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_1, "rx_yellow_prio_1"), \
OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_2, "rx_yellow_prio_2"), \
OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_3, "rx_yellow_prio_3"), \
OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_4, "rx_yellow_prio_4"), \
OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_5, "rx_yellow_prio_5"), \
OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_6, "rx_yellow_prio_6"), \
OCELOT_STAT_ETHTOOL(RX_YELLOW_PRIO_7, "rx_yellow_prio_7"), \
OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_0, "rx_green_prio_0"), \
OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_1, "rx_green_prio_1"), \
OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_2, "rx_green_prio_2"), \
OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_3, "rx_green_prio_3"), \
OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_4, "rx_green_prio_4"), \
OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_5, "rx_green_prio_5"), \
OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_6, "rx_green_prio_6"), \
OCELOT_STAT_ETHTOOL(RX_GREEN_PRIO_7, "rx_green_prio_7"), \
OCELOT_STAT_ETHTOOL(TX_OCTETS, "tx_octets"), \
OCELOT_STAT_ETHTOOL(TX_UNICAST, "tx_unicast"), \
OCELOT_STAT_ETHTOOL(TX_MULTICAST, "tx_multicast"), \
OCELOT_STAT_ETHTOOL(TX_BROADCAST, "tx_broadcast"), \
OCELOT_STAT_ETHTOOL(TX_COLLISION, "tx_collision"), \
OCELOT_STAT_ETHTOOL(TX_DROPS, "tx_drops"), \
OCELOT_STAT_ETHTOOL(TX_PAUSE, "tx_pause"), \
OCELOT_STAT_ETHTOOL(TX_64, "tx_frames_below_65_octets"), \
OCELOT_STAT_ETHTOOL(TX_65_127, "tx_frames_65_to_127_octets"), \
OCELOT_STAT_ETHTOOL(TX_128_255, "tx_frames_128_255_octets"), \
OCELOT_STAT_ETHTOOL(TX_256_511, "tx_frames_256_511_octets"), \
OCELOT_STAT_ETHTOOL(TX_512_1023, "tx_frames_512_1023_octets"), \
OCELOT_STAT_ETHTOOL(TX_1024_1526, "tx_frames_1024_1526_octets"), \
OCELOT_STAT_ETHTOOL(TX_1527_MAX, "tx_frames_over_1526_octets"), \
OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_0, "tx_yellow_prio_0"), \
OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_1, "tx_yellow_prio_1"), \
OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_2, "tx_yellow_prio_2"), \
OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_3, "tx_yellow_prio_3"), \
OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_4, "tx_yellow_prio_4"), \
OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_5, "tx_yellow_prio_5"), \
OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_6, "tx_yellow_prio_6"), \
OCELOT_STAT_ETHTOOL(TX_YELLOW_PRIO_7, "tx_yellow_prio_7"), \
OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_0, "tx_green_prio_0"), \
OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_1, "tx_green_prio_1"), \
OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_2, "tx_green_prio_2"), \
OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_3, "tx_green_prio_3"), \
OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_4, "tx_green_prio_4"), \
OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_5, "tx_green_prio_5"), \
OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_6, "tx_green_prio_6"), \
OCELOT_STAT_ETHTOOL(TX_GREEN_PRIO_7, "tx_green_prio_7"), \
OCELOT_STAT_ETHTOOL(TX_AGED, "tx_aged"), \
OCELOT_STAT_ETHTOOL(DROP_LOCAL, "drop_local"), \
OCELOT_STAT_ETHTOOL(DROP_TAIL, "drop_tail"), \
OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_0, "drop_yellow_prio_0"), \
OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_1, "drop_yellow_prio_1"), \
OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_2, "drop_yellow_prio_2"), \
OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_3, "drop_yellow_prio_3"), \
OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_4, "drop_yellow_prio_4"), \
OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_5, "drop_yellow_prio_5"), \
OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_6, "drop_yellow_prio_6"), \
OCELOT_STAT_ETHTOOL(DROP_YELLOW_PRIO_7, "drop_yellow_prio_7"), \
OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_0, "drop_green_prio_0"), \
OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_1, "drop_green_prio_1"), \
OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_2, "drop_green_prio_2"), \
OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_3, "drop_green_prio_3"), \
OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_4, "drop_green_prio_4"), \
OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_5, "drop_green_prio_5"), \
OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_6, "drop_green_prio_6"), \
OCELOT_STAT_ETHTOOL(DROP_GREEN_PRIO_7, "drop_green_prio_7")
struct ocelot_stats_region {
struct list_head node;
enum ocelot_reg base;
enum ocelot_stat first_stat;
int count;
u32 *buf;
};
static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
OCELOT_COMMON_STATS,
};
static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS] = {
OCELOT_COMMON_STATS,
OCELOT_STAT(RX_ASSEMBLY_ERRS),
OCELOT_STAT(RX_SMD_ERRS),
OCELOT_STAT(RX_ASSEMBLY_OK),
OCELOT_STAT(RX_MERGE_FRAGMENTS),
OCELOT_STAT(TX_MERGE_FRAGMENTS),
OCELOT_STAT(TX_MM_HOLD),
OCELOT_STAT(RX_PMAC_OCTETS),
OCELOT_STAT(RX_PMAC_UNICAST),
OCELOT_STAT(RX_PMAC_MULTICAST),
OCELOT_STAT(RX_PMAC_BROADCAST),
OCELOT_STAT(RX_PMAC_SHORTS),
OCELOT_STAT(RX_PMAC_FRAGMENTS),
OCELOT_STAT(RX_PMAC_JABBERS),
OCELOT_STAT(RX_PMAC_CRC_ALIGN_ERRS),
OCELOT_STAT(RX_PMAC_SYM_ERRS),
OCELOT_STAT(RX_PMAC_64),
OCELOT_STAT(RX_PMAC_65_127),
OCELOT_STAT(RX_PMAC_128_255),
OCELOT_STAT(RX_PMAC_256_511),
OCELOT_STAT(RX_PMAC_512_1023),
OCELOT_STAT(RX_PMAC_1024_1526),
OCELOT_STAT(RX_PMAC_1527_MAX),
OCELOT_STAT(RX_PMAC_PAUSE),
OCELOT_STAT(RX_PMAC_CONTROL),
OCELOT_STAT(RX_PMAC_LONGS),
OCELOT_STAT(TX_PMAC_OCTETS),
OCELOT_STAT(TX_PMAC_UNICAST),
OCELOT_STAT(TX_PMAC_MULTICAST),
OCELOT_STAT(TX_PMAC_BROADCAST),
OCELOT_STAT(TX_PMAC_PAUSE),
OCELOT_STAT(TX_PMAC_64),
OCELOT_STAT(TX_PMAC_65_127),
OCELOT_STAT(TX_PMAC_128_255),
OCELOT_STAT(TX_PMAC_256_511),
OCELOT_STAT(TX_PMAC_512_1023),
OCELOT_STAT(TX_PMAC_1024_1526),
OCELOT_STAT(TX_PMAC_1527_MAX),
};
static const struct ocelot_stat_layout *
ocelot_get_stats_layout(struct ocelot *ocelot)
{
if (ocelot->mm_supported)
return ocelot_mm_stats_layout;
return ocelot_stats_layout;
}
/* Read the counters from hardware and keep them in region->buf.
* Caller must hold &ocelot->stat_view_lock.
*/
static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
struct ocelot_stats_region *region;
int err;
/* Configure the port to read the stats from */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
list_for_each_entry(region, &ocelot->stats_regions, node) {
err = ocelot_bulk_read(ocelot, region->base, region->buf,
region->count);
if (err)
return err;
}
return 0;
}
/* Transfer the counters from region->buf to ocelot->stats.
* Caller must hold &ocelot->stat_view_lock and &ocelot->stats_lock.
*/
static void ocelot_port_transfer_stats(struct ocelot *ocelot, int port)
{
struct ocelot_stats_region *region;
int j;
list_for_each_entry(region, &ocelot->stats_regions, node) {
unsigned int idx = port * OCELOT_NUM_STATS + region->first_stat;
for (j = 0; j < region->count; j++) {
u64 *stat = &ocelot->stats[idx + j];
u64 val = region->buf[j];
if (val < (*stat & U32_MAX))
*stat += (u64)1 << 32;
*stat = (*stat & ~(u64)U32_MAX) + val;
}
}
}
static void ocelot_check_stats_work(struct work_struct *work)
{
struct delayed_work *del_work = to_delayed_work(work);
struct ocelot *ocelot = container_of(del_work, struct ocelot,
stats_work);
int port, err;
mutex_lock(&ocelot->stat_view_lock);
for (port = 0; port < ocelot->num_phys_ports; port++) {
err = ocelot_port_update_stats(ocelot, port);
if (err)
break;
spin_lock(&ocelot->stats_lock);
ocelot_port_transfer_stats(ocelot, port);
spin_unlock(&ocelot->stats_lock);
}
if (!err && ocelot->ops->update_stats)
ocelot->ops->update_stats(ocelot);
mutex_unlock(&ocelot->stat_view_lock);
if (err)
dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
}
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
{
const struct ocelot_stat_layout *layout;
enum ocelot_stat i;
if (sset != ETH_SS_STATS)
return;
layout = ocelot_get_stats_layout(ocelot);
for (i = 0; i < OCELOT_NUM_STATS; i++) {
if (layout[i].name[0] == '\0')
continue;
memcpy(data, layout[i].name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
}
EXPORT_SYMBOL(ocelot_get_strings);
/* Update ocelot->stats for the given port and run the given callback */
static void ocelot_port_stats_run(struct ocelot *ocelot, int port, void *priv,
void (*cb)(struct ocelot *ocelot, int port,
void *priv))
{
int err;
mutex_lock(&ocelot->stat_view_lock);
err = ocelot_port_update_stats(ocelot, port);
if (err) {
dev_err(ocelot->dev, "Failed to update port %d stats: %pe\n",
port, ERR_PTR(err));
goto out_unlock;
}
spin_lock(&ocelot->stats_lock);
ocelot_port_transfer_stats(ocelot, port);
cb(ocelot, port, priv);
spin_unlock(&ocelot->stats_lock);
out_unlock:
mutex_unlock(&ocelot->stat_view_lock);
}
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
const struct ocelot_stat_layout *layout;
enum ocelot_stat i;
int num_stats = 0;
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
layout = ocelot_get_stats_layout(ocelot);
for (i = 0; i < OCELOT_NUM_STATS; i++)
if (layout[i].name[0] != '\0')
num_stats++;
return num_stats;
}
EXPORT_SYMBOL(ocelot_get_sset_count);
static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
const struct ocelot_stat_layout *layout;
enum ocelot_stat i;
u64 *data = priv;
layout = ocelot_get_stats_layout(ocelot);
/* Copy all supported counters */
for (i = 0; i < OCELOT_NUM_STATS; i++) {
int index = port * OCELOT_NUM_STATS + i;
if (layout[i].name[0] == '\0')
continue;
*data++ = ocelot->stats[index];
}
}
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
ocelot_port_stats_run(ocelot, port, data, ocelot_port_ethtool_stats_cb);
}
EXPORT_SYMBOL(ocelot_get_ethtool_stats);
static void ocelot_port_pause_stats_cb(struct ocelot *ocelot, int port, void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_pause_stats *pause_stats = priv;
pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PAUSE];
pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PAUSE];
}
static void ocelot_port_pmac_pause_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_pause_stats *pause_stats = priv;
pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PMAC_PAUSE];
pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PMAC_PAUSE];
}
static void ocelot_port_mm_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_mm_stats *stats = priv;
stats->MACMergeFrameAssErrorCount = s[OCELOT_STAT_RX_ASSEMBLY_ERRS];
stats->MACMergeFrameSmdErrorCount = s[OCELOT_STAT_RX_SMD_ERRS];
stats->MACMergeFrameAssOkCount = s[OCELOT_STAT_RX_ASSEMBLY_OK];
stats->MACMergeFragCountRx = s[OCELOT_STAT_RX_MERGE_FRAGMENTS];
stats->MACMergeFragCountTx = s[OCELOT_STAT_TX_MERGE_FRAGMENTS];
stats->MACMergeHoldCount = s[OCELOT_STAT_TX_MM_HOLD];
}
void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
struct ethtool_pause_stats *pause_stats)
{
struct net_device *dev;
switch (pause_stats->src) {
case ETHTOOL_MAC_STATS_SRC_EMAC:
ocelot_port_stats_run(ocelot, port, pause_stats,
ocelot_port_pause_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_PMAC:
if (ocelot->mm_supported)
ocelot_port_stats_run(ocelot, port, pause_stats,
ocelot_port_pmac_pause_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
dev = ocelot->ops->port_to_netdev(ocelot, port);
ethtool_aggregate_pause_stats(dev, pause_stats);
break;
}
}
EXPORT_SYMBOL_GPL(ocelot_port_get_pause_stats);
void ocelot_port_get_mm_stats(struct ocelot *ocelot, int port,
struct ethtool_mm_stats *stats)
{
if (!ocelot->mm_supported)
return;
ocelot_port_stats_run(ocelot, port, stats, ocelot_port_mm_stats_cb);
}
EXPORT_SYMBOL_GPL(ocelot_port_get_mm_stats);
static const struct ethtool_rmon_hist_range ocelot_rmon_ranges[] = {
{ 64, 64 },
{ 65, 127 },
{ 128, 255 },
{ 256, 511 },
{ 512, 1023 },
{ 1024, 1526 },
{ 1527, 65535 },
{},
};
static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_rmon_stats *rmon_stats = priv;
rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_SHORTS];
rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_LONGS];
rmon_stats->fragments = s[OCELOT_STAT_RX_FRAGMENTS];
rmon_stats->jabbers = s[OCELOT_STAT_RX_JABBERS];
rmon_stats->hist[0] = s[OCELOT_STAT_RX_64];
rmon_stats->hist[1] = s[OCELOT_STAT_RX_65_127];
rmon_stats->hist[2] = s[OCELOT_STAT_RX_128_255];
rmon_stats->hist[3] = s[OCELOT_STAT_RX_256_511];
rmon_stats->hist[4] = s[OCELOT_STAT_RX_512_1023];
rmon_stats->hist[5] = s[OCELOT_STAT_RX_1024_1526];
rmon_stats->hist[6] = s[OCELOT_STAT_RX_1527_MAX];
rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64];
rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127];
rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255];
rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255];
rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511];
rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023];
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
}
static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_rmon_stats *rmon_stats = priv;
rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_PMAC_SHORTS];
rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_PMAC_LONGS];
rmon_stats->fragments = s[OCELOT_STAT_RX_PMAC_FRAGMENTS];
rmon_stats->jabbers = s[OCELOT_STAT_RX_PMAC_JABBERS];
rmon_stats->hist[0] = s[OCELOT_STAT_RX_PMAC_64];
rmon_stats->hist[1] = s[OCELOT_STAT_RX_PMAC_65_127];
rmon_stats->hist[2] = s[OCELOT_STAT_RX_PMAC_128_255];
rmon_stats->hist[3] = s[OCELOT_STAT_RX_PMAC_256_511];
rmon_stats->hist[4] = s[OCELOT_STAT_RX_PMAC_512_1023];
rmon_stats->hist[5] = s[OCELOT_STAT_RX_PMAC_1024_1526];
rmon_stats->hist[6] = s[OCELOT_STAT_RX_PMAC_1527_MAX];
rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64];
rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127];
rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255];
rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255];
rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511];
rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023];
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526];
}
void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges)
{
struct net_device *dev;
*ranges = ocelot_rmon_ranges;
switch (rmon_stats->src) {
case ETHTOOL_MAC_STATS_SRC_EMAC:
ocelot_port_stats_run(ocelot, port, rmon_stats,
ocelot_port_rmon_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_PMAC:
if (ocelot->mm_supported)
ocelot_port_stats_run(ocelot, port, rmon_stats,
ocelot_port_pmac_rmon_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
dev = ocelot->ops->port_to_netdev(ocelot, port);
ethtool_aggregate_rmon_stats(dev, rmon_stats);
break;
}
}
EXPORT_SYMBOL_GPL(ocelot_port_get_rmon_stats);
static void ocelot_port_ctrl_stats_cb(struct ocelot *ocelot, int port, void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_eth_ctrl_stats *ctrl_stats = priv;
ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_CONTROL];
}
static void ocelot_port_pmac_ctrl_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_eth_ctrl_stats *ctrl_stats = priv;
ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_PMAC_CONTROL];
}
void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
struct net_device *dev;
switch (ctrl_stats->src) {
case ETHTOOL_MAC_STATS_SRC_EMAC:
ocelot_port_stats_run(ocelot, port, ctrl_stats,
ocelot_port_ctrl_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_PMAC:
if (ocelot->mm_supported)
ocelot_port_stats_run(ocelot, port, ctrl_stats,
ocelot_port_pmac_ctrl_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
dev = ocelot->ops->port_to_netdev(ocelot, port);
ethtool_aggregate_ctrl_stats(dev, ctrl_stats);
break;
}
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_ctrl_stats);
static void ocelot_port_mac_stats_cb(struct ocelot *ocelot, int port, void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_eth_mac_stats *mac_stats = priv;
mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_OCTETS];
mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_64] +
s[OCELOT_STAT_TX_65_127] +
s[OCELOT_STAT_TX_128_255] +
s[OCELOT_STAT_TX_256_511] +
s[OCELOT_STAT_TX_512_1023] +
s[OCELOT_STAT_TX_1024_1526] +
s[OCELOT_STAT_TX_1527_MAX];
mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_OCTETS];
mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_GREEN_PRIO_0] +
s[OCELOT_STAT_RX_GREEN_PRIO_1] +
s[OCELOT_STAT_RX_GREEN_PRIO_2] +
s[OCELOT_STAT_RX_GREEN_PRIO_3] +
s[OCELOT_STAT_RX_GREEN_PRIO_4] +
s[OCELOT_STAT_RX_GREEN_PRIO_5] +
s[OCELOT_STAT_RX_GREEN_PRIO_6] +
s[OCELOT_STAT_RX_GREEN_PRIO_7] +
s[OCELOT_STAT_RX_YELLOW_PRIO_0] +
s[OCELOT_STAT_RX_YELLOW_PRIO_1] +
s[OCELOT_STAT_RX_YELLOW_PRIO_2] +
s[OCELOT_STAT_RX_YELLOW_PRIO_3] +
s[OCELOT_STAT_RX_YELLOW_PRIO_4] +
s[OCELOT_STAT_RX_YELLOW_PRIO_5] +
s[OCELOT_STAT_RX_YELLOW_PRIO_6] +
s[OCELOT_STAT_RX_YELLOW_PRIO_7];
mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_MULTICAST];
mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_BROADCAST];
mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_MULTICAST];
mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_BROADCAST];
mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_LONGS];
/* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not
* counted individually.
*/
mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
}
static void ocelot_port_pmac_mac_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_eth_mac_stats *mac_stats = priv;
mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_PMAC_OCTETS];
mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_PMAC_64] +
s[OCELOT_STAT_TX_PMAC_65_127] +
s[OCELOT_STAT_TX_PMAC_128_255] +
s[OCELOT_STAT_TX_PMAC_256_511] +
s[OCELOT_STAT_TX_PMAC_512_1023] +
s[OCELOT_STAT_TX_PMAC_1024_1526] +
s[OCELOT_STAT_TX_PMAC_1527_MAX];
mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_PMAC_OCTETS];
mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_PMAC_64] +
s[OCELOT_STAT_RX_PMAC_65_127] +
s[OCELOT_STAT_RX_PMAC_128_255] +
s[OCELOT_STAT_RX_PMAC_256_511] +
s[OCELOT_STAT_RX_PMAC_512_1023] +
s[OCELOT_STAT_RX_PMAC_1024_1526] +
s[OCELOT_STAT_RX_PMAC_1527_MAX];
mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_MULTICAST];
mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_BROADCAST];
mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_MULTICAST];
mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_BROADCAST];
mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_PMAC_LONGS];
/* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not
* counted individually.
*/
mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS];
mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS];
}
void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_mac_stats *mac_stats)
{
struct net_device *dev;
switch (mac_stats->src) {
case ETHTOOL_MAC_STATS_SRC_EMAC:
ocelot_port_stats_run(ocelot, port, mac_stats,
ocelot_port_mac_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_PMAC:
if (ocelot->mm_supported)
ocelot_port_stats_run(ocelot, port, mac_stats,
ocelot_port_pmac_mac_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
dev = ocelot->ops->port_to_netdev(ocelot, port);
ethtool_aggregate_mac_stats(dev, mac_stats);
break;
}
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_mac_stats);
static void ocelot_port_phy_stats_cb(struct ocelot *ocelot, int port, void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_eth_phy_stats *phy_stats = priv;
phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_SYM_ERRS];
}
static void ocelot_port_pmac_phy_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
struct ethtool_eth_phy_stats *phy_stats = priv;
phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_PMAC_SYM_ERRS];
}
void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
struct net_device *dev;
switch (phy_stats->src) {
case ETHTOOL_MAC_STATS_SRC_EMAC:
ocelot_port_stats_run(ocelot, port, phy_stats,
ocelot_port_phy_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_PMAC:
if (ocelot->mm_supported)
ocelot_port_stats_run(ocelot, port, phy_stats,
ocelot_port_pmac_phy_stats_cb);
break;
case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
dev = ocelot->ops->port_to_netdev(ocelot, port);
ethtool_aggregate_phy_stats(dev, phy_stats);
break;
}
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
void ocelot_port_get_stats64(struct ocelot *ocelot, int port,
struct rtnl_link_stats64 *stats)
{
u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
spin_lock(&ocelot->stats_lock);
/* Get Rx stats */
stats->rx_bytes = s[OCELOT_STAT_RX_OCTETS];
stats->rx_packets = s[OCELOT_STAT_RX_SHORTS] +
s[OCELOT_STAT_RX_FRAGMENTS] +
s[OCELOT_STAT_RX_JABBERS] +
s[OCELOT_STAT_RX_LONGS] +
s[OCELOT_STAT_RX_64] +
s[OCELOT_STAT_RX_65_127] +
s[OCELOT_STAT_RX_128_255] +
s[OCELOT_STAT_RX_256_511] +
s[OCELOT_STAT_RX_512_1023] +
s[OCELOT_STAT_RX_1024_1526] +
s[OCELOT_STAT_RX_1527_MAX];
stats->multicast = s[OCELOT_STAT_RX_MULTICAST];
stats->rx_missed_errors = s[OCELOT_STAT_DROP_TAIL];
stats->rx_dropped = s[OCELOT_STAT_RX_RED_PRIO_0] +
s[OCELOT_STAT_RX_RED_PRIO_1] +
s[OCELOT_STAT_RX_RED_PRIO_2] +
s[OCELOT_STAT_RX_RED_PRIO_3] +
s[OCELOT_STAT_RX_RED_PRIO_4] +
s[OCELOT_STAT_RX_RED_PRIO_5] +
s[OCELOT_STAT_RX_RED_PRIO_6] +
s[OCELOT_STAT_RX_RED_PRIO_7] +
s[OCELOT_STAT_DROP_LOCAL] +
s[OCELOT_STAT_DROP_YELLOW_PRIO_0] +
s[OCELOT_STAT_DROP_YELLOW_PRIO_1] +
s[OCELOT_STAT_DROP_YELLOW_PRIO_2] +
s[OCELOT_STAT_DROP_YELLOW_PRIO_3] +
s[OCELOT_STAT_DROP_YELLOW_PRIO_4] +
s[OCELOT_STAT_DROP_YELLOW_PRIO_5] +
s[OCELOT_STAT_DROP_YELLOW_PRIO_6] +
s[OCELOT_STAT_DROP_YELLOW_PRIO_7] +
s[OCELOT_STAT_DROP_GREEN_PRIO_0] +
s[OCELOT_STAT_DROP_GREEN_PRIO_1] +
s[OCELOT_STAT_DROP_GREEN_PRIO_2] +
s[OCELOT_STAT_DROP_GREEN_PRIO_3] +
s[OCELOT_STAT_DROP_GREEN_PRIO_4] +
s[OCELOT_STAT_DROP_GREEN_PRIO_5] +
s[OCELOT_STAT_DROP_GREEN_PRIO_6] +
s[OCELOT_STAT_DROP_GREEN_PRIO_7];
/* Get Tx stats */
stats->tx_bytes = s[OCELOT_STAT_TX_OCTETS];
stats->tx_packets = s[OCELOT_STAT_TX_64] +
s[OCELOT_STAT_TX_65_127] +
s[OCELOT_STAT_TX_128_255] +
s[OCELOT_STAT_TX_256_511] +
s[OCELOT_STAT_TX_512_1023] +
s[OCELOT_STAT_TX_1024_1526] +
s[OCELOT_STAT_TX_1527_MAX];
stats->tx_dropped = s[OCELOT_STAT_TX_DROPS] +
s[OCELOT_STAT_TX_AGED];
stats->collisions = s[OCELOT_STAT_TX_COLLISION];
spin_unlock(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_port_get_stats64);
static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
{
struct ocelot_stats_region *region = NULL;
const struct ocelot_stat_layout *layout;
enum ocelot_reg last = 0;
enum ocelot_stat i;
INIT_LIST_HEAD(&ocelot->stats_regions);
layout = ocelot_get_stats_layout(ocelot);
for (i = 0; i < OCELOT_NUM_STATS; i++) {
if (!layout[i].reg)
continue;
/* enum ocelot_stat must be kept sorted in the same order
* as the addresses behind layout[i].reg in order to have
* efficient bulking
*/
if (last) {
WARN(ocelot->map[SYS][last & REG_MASK] >= ocelot->map[SYS][layout[i].reg & REG_MASK],
"reg 0x%x had address 0x%x but reg 0x%x has address 0x%x, bulking broken!",
last, ocelot->map[SYS][last & REG_MASK],
layout[i].reg, ocelot->map[SYS][layout[i].reg & REG_MASK]);
}
if (region && ocelot->map[SYS][layout[i].reg & REG_MASK] ==
ocelot->map[SYS][last & REG_MASK] + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
region->base = layout[i].reg;
region->first_stat = i;
region->count = 1;
list_add_tail(®ion->node, &ocelot->stats_regions);
}
last = layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
enum ocelot_target target;
u32 addr;
ocelot_reg_to_target_addr(ocelot, region->base, &target,
&addr);
dev_dbg(ocelot->dev,
"region of %d contiguous counters starting with SYS:STAT:CNT[0x%03x]\n",
region->count, addr / 4);
region->buf = devm_kcalloc(ocelot->dev, region->count,
sizeof(*region->buf), GFP_KERNEL);
if (!region->buf)
return -ENOMEM;
}
return 0;
}
int ocelot_stats_init(struct ocelot *ocelot)
{
char queue_name[32];
int ret;
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * OCELOT_NUM_STATS,
sizeof(u64), GFP_KERNEL);
if (!ocelot->stats)
return -ENOMEM;
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
if (!ocelot->stats_queue)
return -ENOMEM;
spin_lock_init(&ocelot->stats_lock);
mutex_init(&ocelot->stat_view_lock);
ret = ocelot_prepare_stats_regions(ocelot);
if (ret) {
destroy_workqueue(ocelot->stats_queue);
return ret;
}
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
return 0;
}
void ocelot_stats_deinit(struct ocelot *ocelot)
{
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
}
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_stats.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
* This contains glue logic between the switchdev driver operations and the
* mscc_ocelot_switch_lib.
*
* Copyright (c) 2017, 2019 Microsemi Corporation
* Copyright 2020-2021 NXP
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <linux/of_net.h>
#include <linux/phy/phy.h>
#include <net/pkt_cls.h>
#include "ocelot.h"
#include "ocelot_police.h"
#include "ocelot_vcap.h"
#include "ocelot_fdma.h"
#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP
struct ocelot_dump_ctx {
struct net_device *dev;
struct sk_buff *skb;
struct netlink_callback *cb;
int idx;
};
static bool ocelot_netdevice_dev_check(const struct net_device *dev);
static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
{
return devlink_priv(dlp->devlink);
}
static int devlink_port_to_port(struct devlink_port *dlp)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
return dlp - ocelot->devlink_ports;
}
static int ocelot_devlink_sb_pool_get(struct devlink *dl,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
{
struct ocelot *ocelot = devlink_priv(dl);
return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
}
static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
u16 pool_index, u32 size,
enum devlink_sb_threshold_type threshold_type,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = devlink_priv(dl);
return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
threshold_type, extack);
}
static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
p_threshold);
}
static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp,
unsigned int sb_index, u16 pool_index,
u32 threshold,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
threshold, extack);
}
static int
ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 *p_pool_index, u32 *p_threshold)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
pool_type, p_pool_index,
p_threshold);
}
static int
ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u16 pool_index, u32 threshold,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
pool_type, pool_index, threshold,
extack);
}
static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl,
unsigned int sb_index)
{
struct ocelot *ocelot = devlink_priv(dl);
return ocelot_sb_occ_snapshot(ocelot, sb_index);
}
static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl,
unsigned int sb_index)
{
struct ocelot *ocelot = devlink_priv(dl);
return ocelot_sb_occ_max_clear(ocelot, sb_index);
}
static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
unsigned int sb_index,
u16 pool_index, u32 *p_cur,
u32 *p_max)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
p_cur, p_max);
}
static int
ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max)
{
struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
int port = devlink_port_to_port(dlp);
return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index,
tc_index, pool_type,
p_cur, p_max);
}
const struct devlink_ops ocelot_devlink_ops = {
.sb_pool_get = ocelot_devlink_sb_pool_get,
.sb_pool_set = ocelot_devlink_sb_pool_set,
.sb_port_pool_get = ocelot_devlink_sb_port_pool_get,
.sb_port_pool_set = ocelot_devlink_sb_port_pool_set,
.sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get,
.sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set,
.sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot,
.sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear,
.sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get,
};
int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
enum devlink_port_flavour flavour)
{
struct devlink_port *dlp = &ocelot->devlink_ports[port];
int id_len = sizeof(ocelot->base_mac);
struct devlink *dl = ocelot->devlink;
struct devlink_port_attrs attrs = {};
memset(dlp, 0, sizeof(*dlp));
memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len);
attrs.switch_id.id_len = id_len;
attrs.phys.port_number = port;
attrs.flavour = flavour;
devlink_port_attrs_set(dlp, &attrs);
return devlink_port_register(dl, dlp, port);
}
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port)
{
struct devlink_port *dlp = &ocelot->devlink_ports[port];
devlink_port_unregister(dlp);
}
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
struct flow_cls_offload *f,
bool ingress)
{
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
if (!ingress)
return -EOPNOTSUPP;
switch (f->command) {
case FLOW_CLS_REPLACE:
return ocelot_cls_flower_replace(ocelot, port, f, ingress);
case FLOW_CLS_DESTROY:
return ocelot_cls_flower_destroy(ocelot, port, f, ingress);
case FLOW_CLS_STATS:
return ocelot_cls_flower_stats(ocelot, port, f, ingress);
default:
return -EOPNOTSUPP;
}
}
static int ocelot_setup_tc_cls_matchall_police(struct ocelot_port_private *priv,
struct tc_cls_matchall_offload *f,
bool ingress,
struct netlink_ext_ack *extack)
{
struct flow_action_entry *action = &f->rule->action.entries[0];
struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_policer pol = { 0 };
int port = priv->port.index;
int err;
if (!ingress) {
NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
return -EOPNOTSUPP;
}
if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
NL_SET_ERR_MSG_MOD(extack,
"Only one policer per port is supported");
return -EEXIST;
}
err = ocelot_policer_validate(&f->rule->action, action, extack);
if (err)
return err;
pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
pol.burst = action->police.burst;
err = ocelot_port_policer_add(ocelot, port, &pol);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
return err;
}
priv->tc.police_id = f->cookie;
priv->tc.offload_cnt++;
return 0;
}
static int ocelot_setup_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
struct tc_cls_matchall_offload *f,
bool ingress,
struct netlink_ext_ack *extack)
{
struct flow_action *action = &f->rule->action;
struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_port_private *other_priv;
const struct flow_action_entry *a;
int err;
if (f->common.protocol != htons(ETH_P_ALL))
return -EOPNOTSUPP;
if (!flow_action_basic_hw_stats_check(action, extack))
return -EOPNOTSUPP;
a = &action->entries[0];
if (!a->dev)
return -EINVAL;
if (!ocelot_netdevice_dev_check(a->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Destination not an ocelot port");
return -EOPNOTSUPP;
}
other_priv = netdev_priv(a->dev);
err = ocelot_port_mirror_add(ocelot, priv->port.index,
other_priv->port.index, ingress, extack);
if (err)
return err;
if (ingress)
priv->tc.ingress_mirred_id = f->cookie;
else
priv->tc.egress_mirred_id = f->cookie;
priv->tc.offload_cnt++;
return 0;
}
static int ocelot_del_tc_cls_matchall_police(struct ocelot_port_private *priv,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
int err;
err = ocelot_port_policer_del(ocelot, port);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Could not delete policer");
return err;
}
priv->tc.police_id = 0;
priv->tc.offload_cnt--;
return 0;
}
static int ocelot_del_tc_cls_matchall_mirred(struct ocelot_port_private *priv,
bool ingress,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
ocelot_port_mirror_del(ocelot, port, ingress);
if (ingress)
priv->tc.ingress_mirred_id = 0;
else
priv->tc.egress_mirred_id = 0;
priv->tc.offload_cnt--;
return 0;
}
static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
struct tc_cls_matchall_offload *f,
bool ingress)
{
struct netlink_ext_ack *extack = f->common.extack;
struct flow_action_entry *action;
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
if (!flow_offload_has_one_action(&f->rule->action)) {
NL_SET_ERR_MSG_MOD(extack,
"Only one action is supported");
return -EOPNOTSUPP;
}
if (priv->tc.block_shared) {
NL_SET_ERR_MSG_MOD(extack,
"Matchall offloads not supported on shared blocks");
return -EOPNOTSUPP;
}
action = &f->rule->action.entries[0];
switch (action->id) {
case FLOW_ACTION_POLICE:
return ocelot_setup_tc_cls_matchall_police(priv, f,
ingress,
extack);
break;
case FLOW_ACTION_MIRRED:
return ocelot_setup_tc_cls_matchall_mirred(priv, f,
ingress,
extack);
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
return -EOPNOTSUPP;
}
break;
case TC_CLSMATCHALL_DESTROY:
action = &f->rule->action.entries[0];
if (f->cookie == priv->tc.police_id)
return ocelot_del_tc_cls_matchall_police(priv, extack);
else if (f->cookie == priv->tc.ingress_mirred_id ||
f->cookie == priv->tc.egress_mirred_id)
return ocelot_del_tc_cls_matchall_mirred(priv, ingress,
extack);
else
return -ENOENT;
break;
case TC_CLSMATCHALL_STATS:
default:
return -EOPNOTSUPP;
}
}
static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
void *type_data,
void *cb_priv, bool ingress)
{
struct ocelot_port_private *priv = cb_priv;
if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSMATCHALL:
return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
case TC_SETUP_CLSFLOWER:
return ocelot_setup_tc_cls_flower(priv, type_data, ingress);
default:
return -EOPNOTSUPP;
}
}
static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type,
void *type_data,
void *cb_priv)
{
return ocelot_setup_tc_block_cb(type, type_data,
cb_priv, true);
}
static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
void *type_data,
void *cb_priv)
{
return ocelot_setup_tc_block_cb(type, type_data,
cb_priv, false);
}
static LIST_HEAD(ocelot_block_cb_list);
static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
struct flow_block_offload *f)
{
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
cb = ocelot_setup_tc_block_cb_ig;
priv->tc.block_shared = f->block_shared;
} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
cb = ocelot_setup_tc_block_cb_eg;
} else {
return -EOPNOTSUPP;
}
f->driver_block_list = &ocelot_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list))
return -EBUSY;
block_cb = flow_block_cb_alloc(cb, priv, priv, NULL);
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, f->driver_block_list);
return 0;
case FLOW_BLOCK_UNBIND:
block_cb = flow_block_cb_lookup(f->block, cb, priv);
if (!block_cb)
return -ENOENT;
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
return 0;
default:
return -EOPNOTSUPP;
}
}
static int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct ocelot_port_private *priv = netdev_priv(dev);
switch (type) {
case TC_SETUP_BLOCK:
return ocelot_setup_tc_block(priv, type_data);
default:
return -EOPNOTSUPP;
}
return 0;
}
static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
bool untagged)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
int ret;
ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged);
if (ret)
return ret;
/* Add the port MAC address to with the right VLAN information */
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
ENTRYTYPE_LOCKED);
return 0;
}
static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
int ret;
/* 8021q removes VID 0 on module unload for all interfaces
* with VLAN filtering feature. We need to keep it to receive
* untagged traffic.
*/
if (vid == OCELOT_STANDALONE_PVID)
return 0;
ret = ocelot_vlan_del(ocelot, port, vid);
if (ret)
return ret;
/* Del the port MAC address to with the right VLAN information */
ocelot_mact_forget(ocelot, dev->dev_addr, vid);
return 0;
}
static int ocelot_port_open(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
phylink_start(priv->phylink);
return 0;
}
static int ocelot_port_stop(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
phylink_stop(priv->phylink);
return 0;
}
static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
u32 rew_op = 0;
if (!static_branch_unlikely(&ocelot_fdma_enabled) &&
!ocelot_can_inject(ocelot, 0))
return NETDEV_TX_BUSY;
/* Check if timestamping is needed */
if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
struct sk_buff *clone = NULL;
if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
if (clone)
OCELOT_SKB_CB(skb)->clone = clone;
rew_op = ocelot_ptp_rew_op(skb);
}
if (static_branch_unlikely(&ocelot_fdma_enabled)) {
ocelot_fdma_inject_frame(ocelot, port, rew_op, skb, dev);
} else {
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
consume_skb(skb);
}
return NETDEV_TX_OK;
}
enum ocelot_action_type {
OCELOT_MACT_LEARN,
OCELOT_MACT_FORGET,
};
struct ocelot_mact_work_ctx {
struct work_struct work;
struct ocelot *ocelot;
enum ocelot_action_type type;
union {
/* OCELOT_MACT_LEARN */
struct {
unsigned char addr[ETH_ALEN];
u16 vid;
enum macaccess_entry_type entry_type;
int pgid;
} learn;
/* OCELOT_MACT_FORGET */
struct {
unsigned char addr[ETH_ALEN];
u16 vid;
} forget;
};
};
#define ocelot_work_to_ctx(x) \
container_of((x), struct ocelot_mact_work_ctx, work)
static void ocelot_mact_work(struct work_struct *work)
{
struct ocelot_mact_work_ctx *w = ocelot_work_to_ctx(work);
struct ocelot *ocelot = w->ocelot;
switch (w->type) {
case OCELOT_MACT_LEARN:
ocelot_mact_learn(ocelot, w->learn.pgid, w->learn.addr,
w->learn.vid, w->learn.entry_type);
break;
case OCELOT_MACT_FORGET:
ocelot_mact_forget(ocelot, w->forget.addr, w->forget.vid);
break;
default:
break;
}
kfree(w);
}
static int ocelot_enqueue_mact_action(struct ocelot *ocelot,
const struct ocelot_mact_work_ctx *ctx)
{
struct ocelot_mact_work_ctx *w = kmemdup(ctx, sizeof(*w), GFP_ATOMIC);
if (!w)
return -ENOMEM;
w->ocelot = ocelot;
INIT_WORK(&w->work, ocelot_mact_work);
queue_work(ocelot->owq, &w->work);
return 0;
}
static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.forget.addr, addr);
w.forget.vid = OCELOT_STANDALONE_PVID;
w.type = OCELOT_MACT_FORGET;
return ocelot_enqueue_mact_action(ocelot, &w);
}
static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.learn.addr, addr);
w.learn.vid = OCELOT_STANDALONE_PVID;
w.learn.pgid = PGID_CPU;
w.learn.entry_type = ENTRYTYPE_LOCKED;
w.type = OCELOT_MACT_LEARN;
return ocelot_enqueue_mact_action(ocelot, &w);
}
static void ocelot_set_rx_mode(struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
u32 val;
int i;
/* This doesn't handle promiscuous mode because the bridge core is
* setting IFF_PROMISC on all slave interfaces and all frames would be
* forwarded to the CPU port.
*/
val = GENMASK(ocelot->num_phys_ports - 1, 0);
for_each_nonreserved_multicast_dest_pgid(ocelot, i)
ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
__dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
}
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
const struct sockaddr *addr = p;
/* Learn the new net device MAC address in the mac table. */
ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_STANDALONE_PVID);
eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
static void ocelot_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
return ocelot_port_get_stats64(ocelot, port, stats);
}
static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
u16 vid, u16 flags,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_fdb_add(ocelot, port, addr, vid, ocelot_port->bridge);
}
static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 vid,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_fdb_del(ocelot, port, addr, vid, ocelot_port->bridge);
}
static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data)
{
struct ocelot_dump_ctx *dump = data;
u32 portid = NETLINK_CB(dump->cb->skb).portid;
u32 seq = dump->cb->nlh->nlmsg_seq;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
if (dump->idx < dump->cb->args[2])
goto skip;
nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
sizeof(*ndm), NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
ndm = nlmsg_data(nlh);
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = NTF_SELF;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dump->dev->ifindex;
ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr))
goto nla_put_failure;
if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid))
goto nla_put_failure;
nlmsg_end(dump->skb, nlh);
skip:
dump->idx++;
return 0;
nla_put_failure:
nlmsg_cancel(dump->skb, nlh);
return -EMSGSIZE;
}
static int ocelot_port_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
struct net_device *filter_dev, int *idx)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
struct ocelot_dump_ctx dump = {
.dev = dev,
.skb = skb,
.cb = cb,
.idx = *idx,
};
int port = priv->port.index;
int ret;
ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump);
*idx = dump.idx;
return ret;
}
static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
return ocelot_vlan_vid_add(dev, vid, false, false);
}
static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
return ocelot_vlan_vid_del(dev, vid);
}
static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
netdev_features_t features)
{
u32 val;
/* Filtering */
val = ocelot_read(ocelot, ANA_VLANMASK);
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
val |= BIT(port);
else
val &= ~BIT(port);
ocelot_write(ocelot, val, ANA_VLANMASK);
}
static int ocelot_set_features(struct net_device *dev,
netdev_features_t features)
{
netdev_features_t changed = dev->features ^ features;
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
priv->tc.offload_cnt) {
netdev_err(dev,
"Cannot disable HW TC offload while offloads active\n");
return -EBUSY;
}
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
ocelot_vlan_mode(ocelot, port, features);
return 0;
}
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
/* If the attached PHY device isn't capable of timestamping operations,
* use our own (when possible).
*/
if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
switch (cmd) {
case SIOCSHWTSTAMP:
return ocelot_hwstamp_set(ocelot, port, ifr);
case SIOCGHWTSTAMP:
return ocelot_hwstamp_get(ocelot, port, ifr);
}
}
return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static int ocelot_change_mtu(struct net_device *dev, int new_mtu)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
ocelot_port_set_maxlen(ocelot, priv->port.index, new_mtu);
WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
.ndo_change_mtu = ocelot_change_mtu,
.ndo_set_rx_mode = ocelot_set_rx_mode,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
.ndo_fdb_add = ocelot_port_fdb_add,
.ndo_fdb_del = ocelot_port_fdb_del,
.ndo_fdb_dump = ocelot_port_fdb_dump,
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_eth_ioctl = ocelot_ioctl,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_port_private *priv;
if (!ocelot_port)
return NULL;
priv = container_of(ocelot_port, struct ocelot_port_private, port);
return priv->dev;
}
/* Checks if the net_device instance given to us originates from our driver */
static bool ocelot_netdevice_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &ocelot_port_netdev_ops;
}
int ocelot_netdev_to_port(struct net_device *dev)
{
struct ocelot_port_private *priv;
if (!dev || !ocelot_netdevice_dev_check(dev))
return -EINVAL;
priv = netdev_priv(dev);
return priv->port.index;
}
static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
struct ocelot_port_private *priv = netdev_priv(netdev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
ocelot_get_strings(ocelot, port, sset, data);
}
static void ocelot_port_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
u64 *data)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
ocelot_get_ethtool_stats(ocelot, port, data);
}
static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
return ocelot_get_sset_count(ocelot, port, sset);
}
static int ocelot_port_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
if (!ocelot->ptp)
return ethtool_op_get_ts_info(dev, info);
return ocelot_get_ts_info(ocelot, port, info);
}
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_port_get_strings,
.get_ethtool_stats = ocelot_port_get_ethtool_stats,
.get_sset_count = ocelot_port_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ocelot_port_get_ts_info,
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
u8 state)
{
ocelot_bridge_stp_state_set(ocelot, port, state);
}
static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
ocelot_set_ageing_time(ocelot, ageing_time);
}
static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
{
u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
u32 val = 0;
if (mc)
val = cpu_fwd_mcast;
ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast,
ANA_PORT_CPU_FWD_CFG, port);
}
static int ocelot_port_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
int err = 0;
if (ctx && ctx != priv)
return 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering,
extack);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = ocelot_port_pre_bridge_flags(ocelot, port,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
ocelot_port_bridge_flags(ocelot, port, attr->u.brport_flags);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid,
bool untagged, struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged, extack);
}
static int ocelot_port_obj_add_vlan(struct net_device *dev,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
int ret;
ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged, extack);
if (ret)
return ret;
return ocelot_vlan_vid_add(dev, vlan->vid, pvid, untagged);
}
static int ocelot_port_obj_add_mdb(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_port_mdb_add(ocelot, port, mdb, ocelot_port->bridge);
}
static int ocelot_port_obj_del_mdb(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_port_mdb_del(ocelot, port, mdb, ocelot_port->bridge);
}
static int ocelot_port_obj_mrp_add(struct net_device *dev,
const struct switchdev_obj_mrp *mrp)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_mrp_add(ocelot, port, mrp);
}
static int ocelot_port_obj_mrp_del(struct net_device *dev,
const struct switchdev_obj_mrp *mrp)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_mrp_del(ocelot, port, mrp);
}
static int
ocelot_port_obj_mrp_add_ring_role(struct net_device *dev,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_mrp_add_ring_role(ocelot, port, mrp);
}
static int
ocelot_port_obj_mrp_del_ring_role(struct net_device *dev,
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
static int ocelot_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
int ret = 0;
if (ctx && ctx != priv)
return 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
ret = ocelot_port_obj_add_vlan(dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
extack);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_MRP:
ret = ocelot_port_obj_mrp_add(dev, SWITCHDEV_OBJ_MRP(obj));
break;
case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
ret = ocelot_port_obj_mrp_add_ring_role(dev,
SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static int ocelot_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct ocelot_port_private *priv = netdev_priv(dev);
int ret = 0;
if (ctx && ctx != priv)
return 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
ret = ocelot_vlan_vid_del(dev,
SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_MRP:
ret = ocelot_port_obj_mrp_del(dev, SWITCHDEV_OBJ_MRP(obj));
break;
case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
ret = ocelot_port_obj_mrp_del_ring_role(dev,
SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static void ocelot_inherit_brport_flags(struct ocelot *ocelot, int port,
struct net_device *brport_dev)
{
struct switchdev_brport_flags flags = {0};
int flag;
flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
for_each_set_bit(flag, &flags.mask, 32)
if (br_port_flag_is_set(brport_dev, BIT(flag)))
flags.val |= BIT(flag);
ocelot_port_bridge_flags(ocelot, port, flags);
}
static void ocelot_clear_brport_flags(struct ocelot *ocelot, int port)
{
struct switchdev_brport_flags flags;
flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
flags.val = flags.mask & ~BR_LEARNING;
ocelot_port_bridge_flags(ocelot, port, flags);
}
static int ocelot_switchdev_sync(struct ocelot *ocelot, int port,
struct net_device *brport_dev,
struct net_device *bridge_dev,
struct netlink_ext_ack *extack)
{
clock_t ageing_time;
u8 stp_state;
ocelot_inherit_brport_flags(ocelot, port, brport_dev);
stp_state = br_port_get_stp_state(brport_dev);
ocelot_bridge_stp_state_set(ocelot, port, stp_state);
ageing_time = br_get_ageing_time(bridge_dev);
ocelot_port_attr_ageing_set(ocelot, port, ageing_time);
return ocelot_port_vlan_filtering(ocelot, port,
br_vlan_enabled(bridge_dev),
extack);
}
static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port)
{
int err;
err = ocelot_port_vlan_filtering(ocelot, port, false, NULL);
if (err)
return err;
ocelot_clear_brport_flags(ocelot, port);
ocelot_bridge_stp_state_set(ocelot, port, BR_STATE_FORWARDING);
return 0;
}
static int ocelot_bridge_num_get(struct ocelot *ocelot,
const struct net_device *bridge_dev)
{
int bridge_num = ocelot_bridge_num_find(ocelot, bridge_dev);
if (bridge_num < 0) {
/* First port that offloads this bridge */
bridge_num = find_first_zero_bit(&ocelot->bridges,
ocelot->num_phys_ports);
set_bit(bridge_num, &ocelot->bridges);
}
return bridge_num;
}
static void ocelot_bridge_num_put(struct ocelot *ocelot,
const struct net_device *bridge_dev,
int bridge_num)
{
/* Check if the bridge is still in use, otherwise it is time
* to clean it up so we can reuse this bridge_num later.
*/
if (!ocelot_bridge_num_find(ocelot, bridge_dev))
clear_bit(bridge_num, &ocelot->bridges);
}
static int ocelot_netdevice_bridge_join(struct net_device *dev,
struct net_device *brport_dev,
struct net_device *bridge,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
int bridge_num, err;
bridge_num = ocelot_bridge_num_get(ocelot, bridge);
err = ocelot_port_bridge_join(ocelot, port, bridge, bridge_num,
extack);
if (err)
goto err_join;
err = switchdev_bridge_port_offload(brport_dev, dev, priv,
&ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb,
false, extack);
if (err)
goto err_switchdev_offload;
err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack);
if (err)
goto err_switchdev_sync;
return 0;
err_switchdev_sync:
switchdev_bridge_port_unoffload(brport_dev, priv,
&ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
err_join:
ocelot_bridge_num_put(ocelot, bridge, bridge_num);
return err;
}
static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev,
struct net_device *brport_dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
switchdev_bridge_port_unoffload(brport_dev, priv,
&ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
}
static int ocelot_netdevice_bridge_leave(struct net_device *dev,
struct net_device *brport_dev,
struct net_device *bridge)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int bridge_num = ocelot_port->bridge_num;
int port = priv->port.index;
int err;
err = ocelot_switchdev_unsync(ocelot, port);
if (err)
return err;
ocelot_port_bridge_leave(ocelot, port, bridge);
ocelot_bridge_num_put(ocelot, bridge, bridge_num);
return 0;
}
static int ocelot_netdevice_lag_join(struct net_device *dev,
struct net_device *bond,
struct netdev_lag_upper_info *info,
struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
struct net_device *bridge_dev;
int port = priv->port.index;
int err;
err = ocelot_port_lag_join(ocelot, port, bond, info, extack);
if (err == -EOPNOTSUPP)
/* Offloading not supported, fall back to software LAG */
return 0;
bridge_dev = netdev_master_upper_dev_get(bond);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
return 0;
err = ocelot_netdevice_bridge_join(dev, bond, bridge_dev, extack);
if (err)
goto err_bridge_join;
return 0;
err_bridge_join:
ocelot_port_lag_leave(ocelot, port, bond);
return err;
}
static void ocelot_netdevice_pre_lag_leave(struct net_device *dev,
struct net_device *bond)
{
struct net_device *bridge_dev;
bridge_dev = netdev_master_upper_dev_get(bond);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
return;
ocelot_netdevice_pre_bridge_leave(dev, bond);
}
static int ocelot_netdevice_lag_leave(struct net_device *dev,
struct net_device *bond)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
struct net_device *bridge_dev;
int port = priv->port.index;
ocelot_port_lag_leave(ocelot, port, bond);
bridge_dev = netdev_master_upper_dev_get(bond);
if (!bridge_dev || !netif_is_bridge_master(bridge_dev))
return 0;
return ocelot_netdevice_bridge_leave(dev, bond, bridge_dev);
}
static int ocelot_netdevice_changeupper(struct net_device *dev,
struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
int err = 0;
extack = netdev_notifier_info_to_extack(&info->info);
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
err = ocelot_netdevice_bridge_join(dev, brport_dev,
info->upper_dev,
extack);
else
err = ocelot_netdevice_bridge_leave(dev, brport_dev,
info->upper_dev);
}
if (netif_is_lag_master(info->upper_dev)) {
if (info->linking)
err = ocelot_netdevice_lag_join(dev, info->upper_dev,
info->upper_info, extack);
else
ocelot_netdevice_lag_leave(dev, info->upper_dev);
}
return notifier_from_errno(err);
}
/* Treat CHANGEUPPER events on an offloaded LAG as individual CHANGEUPPER
* events for the lower physical ports of the LAG.
* If the LAG upper isn't offloaded, ignore its CHANGEUPPER events.
* In case the LAG joined a bridge, notify that we are offloading it and can do
* forwarding in hardware towards it.
*/
static int
ocelot_netdevice_lag_changeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *lower;
struct list_head *iter;
int err = NOTIFY_DONE;
netdev_for_each_lower_dev(dev, lower, iter) {
struct ocelot_port_private *priv = netdev_priv(lower);
struct ocelot_port *ocelot_port = &priv->port;
if (ocelot_port->bond != dev)
return NOTIFY_OK;
err = ocelot_netdevice_changeupper(lower, dev, info);
if (err)
return notifier_from_errno(err);
}
return NOTIFY_DONE;
}
static int
ocelot_netdevice_prechangeupper(struct net_device *dev,
struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
if (netif_is_bridge_master(info->upper_dev) && !info->linking)
ocelot_netdevice_pre_bridge_leave(dev, brport_dev);
if (netif_is_lag_master(info->upper_dev) && !info->linking)
ocelot_netdevice_pre_lag_leave(dev, info->upper_dev);
return NOTIFY_DONE;
}
static int
ocelot_netdevice_lag_prechangeupper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct net_device *lower;
struct list_head *iter;
int err = NOTIFY_DONE;
netdev_for_each_lower_dev(dev, lower, iter) {
struct ocelot_port_private *priv = netdev_priv(lower);
struct ocelot_port *ocelot_port = &priv->port;
if (ocelot_port->bond != dev)
return NOTIFY_OK;
err = ocelot_netdevice_prechangeupper(dev, lower, info);
if (err)
return err;
}
return NOTIFY_DONE;
}
static int
ocelot_netdevice_changelowerstate(struct net_device *dev,
struct netdev_lag_lower_state_info *info)
{
struct ocelot_port_private *priv = netdev_priv(dev);
bool is_active = info->link_up && info->tx_enabled;
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->port.index;
if (!ocelot_port->bond)
return NOTIFY_DONE;
if (ocelot_port->lag_tx_active == is_active)
return NOTIFY_DONE;
ocelot_port_lag_change(ocelot, port, is_active);
return NOTIFY_OK;
}
static int ocelot_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
switch (event) {
case NETDEV_PRECHANGEUPPER: {
struct netdev_notifier_changeupper_info *info = ptr;
if (ocelot_netdevice_dev_check(dev))
return ocelot_netdevice_prechangeupper(dev, dev, info);
if (netif_is_lag_master(dev))
return ocelot_netdevice_lag_prechangeupper(dev, info);
break;
}
case NETDEV_CHANGEUPPER: {
struct netdev_notifier_changeupper_info *info = ptr;
if (ocelot_netdevice_dev_check(dev))
return ocelot_netdevice_changeupper(dev, dev, info);
if (netif_is_lag_master(dev))
return ocelot_netdevice_lag_changeupper(dev, info);
break;
}
case NETDEV_CHANGELOWERSTATE: {
struct netdev_notifier_changelowerstate_info *info = ptr;
if (!ocelot_netdevice_dev_check(dev))
break;
return ocelot_netdevice_changelowerstate(dev,
info->lower_state_info);
}
default:
break;
}
return NOTIFY_DONE;
}
struct notifier_block ocelot_netdevice_nb __read_mostly = {
.notifier_call = ocelot_netdevice_event,
};
static int ocelot_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
ocelot_netdevice_dev_check,
ocelot_port_attr_set);
return notifier_from_errno(err);
}
return NOTIFY_DONE;
}
struct notifier_block ocelot_switchdev_nb __read_mostly = {
.notifier_call = ocelot_switchdev_event,
};
static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
int err;
switch (event) {
/* Blocking events. */
case SWITCHDEV_PORT_OBJ_ADD:
err = switchdev_handle_port_obj_add(dev, ptr,
ocelot_netdevice_dev_check,
ocelot_port_obj_add);
return notifier_from_errno(err);
case SWITCHDEV_PORT_OBJ_DEL:
err = switchdev_handle_port_obj_del(dev, ptr,
ocelot_netdevice_dev_check,
ocelot_port_obj_del);
return notifier_from_errno(err);
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
ocelot_netdevice_dev_check,
ocelot_port_attr_set);
return notifier_from_errno(err);
}
return NOTIFY_DONE;
}
struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
.notifier_call = ocelot_switchdev_blocking_event,
};
static void vsc7514_phylink_mac_config(struct phylink_config *config,
unsigned int link_an_mode,
const struct phylink_link_state *state)
{
struct net_device *ndev = to_net_dev(config->dev);
struct ocelot_port_private *priv = netdev_priv(ndev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
ocelot_phylink_mac_config(ocelot, port, link_an_mode, state);
}
static void vsc7514_phylink_mac_link_down(struct phylink_config *config,
unsigned int link_an_mode,
phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct ocelot_port_private *priv = netdev_priv(ndev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
OCELOT_MAC_QUIRKS);
}
static void vsc7514_phylink_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
unsigned int link_an_mode,
phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
struct net_device *ndev = to_net_dev(config->dev);
struct ocelot_port_private *priv = netdev_priv(ndev);
struct ocelot *ocelot = priv->port.ocelot;
int port = priv->port.index;
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex,
tx_pause, rx_pause, OCELOT_MAC_QUIRKS);
}
static const struct phylink_mac_ops ocelot_phylink_ops = {
.mac_config = vsc7514_phylink_mac_config,
.mac_link_down = vsc7514_phylink_mac_link_down,
.mac_link_up = vsc7514_phylink_mac_link_up,
};
static int ocelot_port_phylink_create(struct ocelot *ocelot, int port,
struct device_node *portnp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
struct ocelot_port_private *priv;
struct device *dev = ocelot->dev;
phy_interface_t phy_mode;
struct phylink *phylink;
int err;
of_get_phy_mode(portnp, &phy_mode);
/* DT bindings of internal PHY ports are broken and don't
* specify a phy-mode
*/
if (phy_mode == PHY_INTERFACE_MODE_NA)
phy_mode = PHY_INTERFACE_MODE_INTERNAL;
if (phy_mode != PHY_INTERFACE_MODE_SGMII &&
phy_mode != PHY_INTERFACE_MODE_QSGMII &&
phy_mode != PHY_INTERFACE_MODE_INTERNAL) {
dev_err(dev, "unsupported phy mode %s for port %d\n",
phy_modes(phy_mode), port);
return -EINVAL;
}
ocelot_port->phy_mode = phy_mode;
err = ocelot_port_configure_serdes(ocelot, port, portnp);
if (err)
return err;
priv = container_of(ocelot_port, struct ocelot_port_private, port);
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
__set_bit(ocelot_port->phy_mode,
priv->phylink_config.supported_interfaces);
phylink = phylink_create(&priv->phylink_config,
of_fwnode_handle(portnp),
phy_mode, &ocelot_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
dev_err(dev, "Could not create phylink (%pe)\n", phylink);
return err;
}
priv->phylink = phylink;
err = phylink_of_phy_connect(phylink, portnp, 0);
if (err) {
dev_err(dev, "Could not connect to PHY: %pe\n", ERR_PTR(err));
phylink_destroy(phylink);
priv->phylink = NULL;
return err;
}
return 0;
}
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
struct device_node *portnp)
{
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct net_device *dev;
int err;
dev = alloc_etherdev(sizeof(struct ocelot_port_private));
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, ocelot->dev);
priv = netdev_priv(dev);
priv->dev = dev;
ocelot_port = &priv->port;
ocelot_port->ocelot = ocelot;
ocelot_port->index = port;
ocelot_port->target = target;
ocelot->ports[port] = ocelot_port;
dev->netdev_ops = &ocelot_port_netdev_ops;
dev->ethtool_ops = &ocelot_ethtool_ops;
dev->max_mtu = OCELOT_JUMBO_MTU;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
NETIF_F_HW_TC;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
err = of_get_ethdev_address(portnp, dev);
if (err)
eth_hw_addr_gen(dev, ocelot->base_mac, port);
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
OCELOT_STANDALONE_PVID, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
err = ocelot_port_phylink_create(ocelot, port, portnp);
if (err)
goto out;
if (ocelot->fdma)
ocelot_fdma_netdev_init(ocelot, dev);
SET_NETDEV_DEVLINK_PORT(dev, &ocelot->devlink_ports[port]);
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
goto out_fdma_deinit;
}
return 0;
out_fdma_deinit:
if (ocelot->fdma)
ocelot_fdma_netdev_deinit(ocelot, dev);
out:
ocelot->ports[port] = NULL;
free_netdev(dev);
return err;
}
void ocelot_release_port(struct ocelot_port *ocelot_port)
{
struct ocelot_port_private *priv = container_of(ocelot_port,
struct ocelot_port_private,
port);
struct ocelot *ocelot = ocelot_port->ocelot;
struct ocelot_fdma *fdma = ocelot->fdma;
unregister_netdev(priv->dev);
if (fdma)
ocelot_fdma_netdev_deinit(ocelot, priv->dev);
if (priv->phylink) {
rtnl_lock();
phylink_disconnect_phy(priv->phylink);
rtnl_unlock();
phylink_destroy(priv->phylink);
}
free_netdev(priv->dev);
}
|
linux-master
|
drivers/net/ethernet/mscc/ocelot_net.c
|
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/*
* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017 Microsemi Corporation
* Copyright (c) 2021 Innovative Advantage
*/
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/vsc7514_regs.h>
#include "ocelot.h"
const struct reg_field vsc7514_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
[ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
[ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
[ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
[ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
[ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
[ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
[ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
[ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
[ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
[ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
[ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
[ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
[ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
[ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
[ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
[ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
[ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
[ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
[ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
[ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
[ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
[ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
[ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
[ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
[ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
[ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
[ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
[ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
[ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
[ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
[ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
[ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
[QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
[QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
[QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
[QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
[QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
[SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
[SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
[SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
/* Replicated per number of ports (12), register size 4 per port */
[QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
[QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
[QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
[QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
[QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
[QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
[SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
[SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
[SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
[SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
[SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
[SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
};
EXPORT_SYMBOL(vsc7514_regfields);
static const u32 vsc7514_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
REG(ANA_VLANMASK, 0x009004),
REG(ANA_PORT_B_DOMAIN, 0x009008),
REG(ANA_ANAGEFIL, 0x00900c),
REG(ANA_ANEVENTS, 0x009010),
REG(ANA_STORMLIMIT_BURST, 0x009014),
REG(ANA_STORMLIMIT_CFG, 0x009018),
REG(ANA_ISOLATED_PORTS, 0x009028),
REG(ANA_COMMUNITY_PORTS, 0x00902c),
REG(ANA_AUTOAGE, 0x009030),
REG(ANA_MACTOPTIONS, 0x009034),
REG(ANA_LEARNDISC, 0x009038),
REG(ANA_AGENCTRL, 0x00903c),
REG(ANA_MIRRORPORTS, 0x009040),
REG(ANA_EMIRRORPORTS, 0x009044),
REG(ANA_FLOODING, 0x009048),
REG(ANA_FLOODING_IPMC, 0x00904c),
REG(ANA_SFLOW_CFG, 0x009050),
REG(ANA_PORT_MODE, 0x009080),
REG(ANA_PGID_PGID, 0x008c00),
REG(ANA_TABLES_ANMOVED, 0x008b30),
REG(ANA_TABLES_MACHDATA, 0x008b34),
REG(ANA_TABLES_MACLDATA, 0x008b38),
REG(ANA_TABLES_MACACCESS, 0x008b3c),
REG(ANA_TABLES_MACTINDX, 0x008b40),
REG(ANA_TABLES_VLANACCESS, 0x008b44),
REG(ANA_TABLES_VLANTIDX, 0x008b48),
REG(ANA_TABLES_ISDXACCESS, 0x008b4c),
REG(ANA_TABLES_ISDXTIDX, 0x008b50),
REG(ANA_TABLES_ENTRYLIM, 0x008b00),
REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54),
REG(ANA_TABLES_PTP_ID_LOW, 0x008b58),
REG(ANA_MSTI_STATE, 0x008e00),
REG(ANA_PORT_VLAN_CFG, 0x007000),
REG(ANA_PORT_DROP_CFG, 0x007004),
REG(ANA_PORT_QOS_CFG, 0x007008),
REG(ANA_PORT_VCAP_CFG, 0x00700c),
REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010),
REG(ANA_PORT_VCAP_S2_CFG, 0x00701c),
REG(ANA_PORT_PCP_DEI_MAP, 0x007020),
REG(ANA_PORT_CPU_FWD_CFG, 0x007060),
REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064),
REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068),
REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c),
REG(ANA_PORT_PORT_CFG, 0x007070),
REG(ANA_PORT_POL_CFG, 0x007074),
REG(ANA_PORT_PTP_CFG, 0x007078),
REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c),
REG(ANA_OAM_UPM_LM_CNT, 0x007c00),
REG(ANA_PORT_PTP_DLY2_CFG, 0x007080),
REG(ANA_PFC_PFC_CFG, 0x008800),
REG(ANA_PFC_PFC_TIMER, 0x008804),
REG(ANA_IPT_OAM_MEP_CFG, 0x008000),
REG(ANA_IPT_IPT, 0x008004),
REG(ANA_PPT_PPT, 0x008ac0),
REG(ANA_FID_MAP_FID_MAP, 0x000000),
REG(ANA_AGGR_CFG, 0x0090b4),
REG(ANA_CPUQ_CFG, 0x0090b8),
REG(ANA_CPUQ_CFG2, 0x0090bc),
REG(ANA_CPUQ_8021_CFG, 0x0090c0),
REG(ANA_DSCP_CFG, 0x009100),
REG(ANA_DSCP_REWR_CFG, 0x009200),
REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240),
REG(ANA_VCAP_RNG_VAL_CFG, 0x009260),
REG(ANA_VRAP_CFG, 0x009280),
REG(ANA_VRAP_HDR_DATA, 0x009284),
REG(ANA_VRAP_HDR_MASK, 0x009288),
REG(ANA_DISCARD_CFG, 0x00928c),
REG(ANA_FID_CFG, 0x009290),
REG(ANA_POL_PIR_CFG, 0x004000),
REG(ANA_POL_CIR_CFG, 0x004004),
REG(ANA_POL_MODE_CFG, 0x004008),
REG(ANA_POL_PIR_STATE, 0x00400c),
REG(ANA_POL_CIR_STATE, 0x004010),
REG(ANA_POL_STATE, 0x004014),
REG(ANA_POL_FLOWC, 0x008b80),
REG(ANA_POL_HYST, 0x008bec),
REG(ANA_POL_MISC_CFG, 0x008bf0),
};
static const u32 vsc7514_qs_regmap[] = {
REG(QS_XTR_GRP_CFG, 0x000000),
REG(QS_XTR_RD, 0x000008),
REG(QS_XTR_FRM_PRUNING, 0x000010),
REG(QS_XTR_FLUSH, 0x000018),
REG(QS_XTR_DATA_PRESENT, 0x00001c),
REG(QS_XTR_CFG, 0x000020),
REG(QS_INJ_GRP_CFG, 0x000024),
REG(QS_INJ_WR, 0x00002c),
REG(QS_INJ_CTRL, 0x000034),
REG(QS_INJ_STATUS, 0x00003c),
REG(QS_INJ_ERR, 0x000040),
REG(QS_INH_DBG, 0x000048),
};
static const u32 vsc7514_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x011200),
REG(QSYS_SWITCH_PORT_MODE, 0x011234),
REG(QSYS_STAT_CNT_CFG, 0x011264),
REG(QSYS_EEE_CFG, 0x011268),
REG(QSYS_EEE_THRES, 0x011294),
REG(QSYS_IGR_NO_SHARING, 0x011298),
REG(QSYS_EGR_NO_SHARING, 0x01129c),
REG(QSYS_SW_STATUS, 0x0112a0),
REG(QSYS_EXT_CPU_CFG, 0x0112d0),
REG(QSYS_PAD_CFG, 0x0112d4),
REG(QSYS_CPU_GROUP_MAP, 0x0112d8),
REG(QSYS_QMAP, 0x0112dc),
REG(QSYS_ISDX_SGRP, 0x011400),
REG(QSYS_TIMED_FRAME_ENTRY, 0x014000),
REG(QSYS_TFRM_MISC, 0x011310),
REG(QSYS_TFRM_PORT_DLY, 0x011314),
REG(QSYS_TFRM_TIMER_CFG_1, 0x011318),
REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c),
REG(QSYS_TFRM_TIMER_CFG_3, 0x011320),
REG(QSYS_TFRM_TIMER_CFG_4, 0x011324),
REG(QSYS_TFRM_TIMER_CFG_5, 0x011328),
REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c),
REG(QSYS_TFRM_TIMER_CFG_7, 0x011330),
REG(QSYS_TFRM_TIMER_CFG_8, 0x011334),
REG(QSYS_RED_PROFILE, 0x011338),
REG(QSYS_RES_QOS_MODE, 0x011378),
REG(QSYS_RES_CFG, 0x012000),
REG(QSYS_RES_STAT, 0x012004),
REG(QSYS_EGR_DROP_MODE, 0x01137c),
REG(QSYS_EQ_CTRL, 0x011380),
REG(QSYS_EVENTS_CORE, 0x011384),
REG(QSYS_CIR_CFG, 0x000000),
REG(QSYS_EIR_CFG, 0x000004),
REG(QSYS_SE_CFG, 0x000008),
REG(QSYS_SE_DWRR_CFG, 0x00000c),
REG(QSYS_SE_CONNECT, 0x00003c),
REG(QSYS_SE_DLB_SENSE, 0x000040),
REG(QSYS_CIR_STATE, 0x000044),
REG(QSYS_EIR_STATE, 0x000048),
REG(QSYS_SE_STATE, 0x00004c),
REG(QSYS_HSCH_MISC_CFG, 0x011388),
};
static const u32 vsc7514_rew_regmap[] = {
REG(REW_PORT_VLAN_CFG, 0x000000),
REG(REW_TAG_CFG, 0x000004),
REG(REW_PORT_CFG, 0x000008),
REG(REW_DSCP_CFG, 0x00000c),
REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010),
REG(REW_PTP_CFG, 0x000050),
REG(REW_PTP_DLY1_CFG, 0x000054),
REG(REW_DSCP_REMAP_DP1_CFG, 0x000690),
REG(REW_DSCP_REMAP_CFG, 0x000790),
REG(REW_STAT_CFG, 0x000890),
REG(REW_PPT, 0x000680),
};
static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
REG(SYS_COUNT_RX_256_511, 0x000030),
REG(SYS_COUNT_RX_512_1023, 0x000034),
REG(SYS_COUNT_RX_1024_1526, 0x000038),
REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
REG(SYS_COUNT_RX_PAUSE, 0x000040),
REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
REG(SYS_COUNT_TX_UNICAST, 0x000104),
REG(SYS_COUNT_TX_MULTICAST, 0x000108),
REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
REG(SYS_COUNT_TX_128_255, 0x000124),
REG(SYS_COUNT_TX_256_511, 0x000128),
REG(SYS_COUNT_TX_512_1023, 0x00012c),
REG(SYS_COUNT_TX_1024_1526, 0x000130),
REG(SYS_COUNT_TX_1527_MAX, 0x000134),
REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
REG(SYS_COUNT_TX_AGED, 0x000178),
REG(SYS_COUNT_DROP_LOCAL, 0x000200),
REG(SYS_COUNT_DROP_TAIL, 0x000204),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000508),
REG(SYS_CMID, 0x00050c),
REG(SYS_VLAN_ETYPE_CFG, 0x000510),
REG(SYS_PORT_MODE, 0x000514),
REG(SYS_FRONT_PORT_MODE, 0x000548),
REG(SYS_FRM_AGING, 0x000574),
REG(SYS_STAT_CFG, 0x000578),
REG(SYS_SW_STATUS, 0x00057c),
REG(SYS_MISC_CFG, 0x0005ac),
REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0),
REG(SYS_REW_MAC_LOW_CFG, 0x0005dc),
REG(SYS_CM_ADDR, 0x000500),
REG(SYS_CM_DATA, 0x000504),
REG(SYS_PAUSE_CFG, 0x000608),
REG(SYS_PAUSE_TOT_CFG, 0x000638),
REG(SYS_ATOP, 0x00063c),
REG(SYS_ATOP_TOT_CFG, 0x00066c),
REG(SYS_MAC_FC_CFG, 0x000670),
REG(SYS_MMGT, 0x00069c),
REG(SYS_MMGT_FAST, 0x0006a0),
REG(SYS_EVENTS_DIF, 0x0006a4),
REG(SYS_EVENTS_CORE, 0x0006b4),
REG(SYS_PTP_STATUS, 0x0006b8),
REG(SYS_PTP_TXSTAMP, 0x0006bc),
REG(SYS_PTP_NXT, 0x0006c0),
REG(SYS_PTP_CFG, 0x0006c4),
};
static const u32 vsc7514_vcap_regmap[] = {
/* VCAP_CORE_CFG */
REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
REG(VCAP_CORE_MV_CFG, 0x000004),
/* VCAP_CORE_CACHE */
REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
REG(VCAP_CACHE_MASK_DAT, 0x000108),
REG(VCAP_CACHE_ACTION_DAT, 0x000208),
REG(VCAP_CACHE_CNT_DAT, 0x000308),
REG(VCAP_CACHE_TG_DAT, 0x000388),
/* VCAP_CONST */
REG(VCAP_CONST_VCAP_VER, 0x000398),
REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
REG(VCAP_CONST_CORE_CNT, 0x0003b8),
REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
static const u32 vsc7514_ptp_regmap[] = {
REG(PTP_PIN_CFG, 0x000000),
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
REG(PTP_PIN_TOD_NSEC, 0x00000c),
REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
REG(PTP_CFG_MISC, 0x0000a0),
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
};
static const u32 vsc7514_dev_gmii_regmap[] = {
REG(DEV_CLOCK_CFG, 0x0),
REG(DEV_PORT_MISC, 0x4),
REG(DEV_EVENTS, 0x8),
REG(DEV_EEE_CFG, 0xc),
REG(DEV_RX_PATH_DELAY, 0x10),
REG(DEV_TX_PATH_DELAY, 0x14),
REG(DEV_PTP_PREDICT_CFG, 0x18),
REG(DEV_MAC_ENA_CFG, 0x1c),
REG(DEV_MAC_MODE_CFG, 0x20),
REG(DEV_MAC_MAXLEN_CFG, 0x24),
REG(DEV_MAC_TAGS_CFG, 0x28),
REG(DEV_MAC_ADV_CHK_CFG, 0x2c),
REG(DEV_MAC_IFG_CFG, 0x30),
REG(DEV_MAC_HDX_CFG, 0x34),
REG(DEV_MAC_DBG_CFG, 0x38),
REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
REG(DEV_MAC_STICKY, 0x44),
REG(PCS1G_CFG, 0x48),
REG(PCS1G_MODE_CFG, 0x4c),
REG(PCS1G_SD_CFG, 0x50),
REG(PCS1G_ANEG_CFG, 0x54),
REG(PCS1G_ANEG_NP_CFG, 0x58),
REG(PCS1G_LB_CFG, 0x5c),
REG(PCS1G_DBG_CFG, 0x60),
REG(PCS1G_CDET_CFG, 0x64),
REG(PCS1G_ANEG_STATUS, 0x68),
REG(PCS1G_ANEG_NP_STATUS, 0x6c),
REG(PCS1G_LINK_STATUS, 0x70),
REG(PCS1G_LINK_DOWN_CNT, 0x74),
REG(PCS1G_STICKY, 0x78),
REG(PCS1G_DEBUG_STATUS, 0x7c),
REG(PCS1G_LPI_CFG, 0x80),
REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84),
REG(PCS1G_LPI_STATUS, 0x88),
REG(PCS1G_TSTPAT_MODE_CFG, 0x8c),
REG(PCS1G_TSTPAT_STATUS, 0x90),
REG(DEV_PCS_FX100_CFG, 0x94),
REG(DEV_PCS_FX100_STATUS, 0x98),
};
const u32 *vsc7514_regmap[TARGET_MAX] = {
[ANA] = vsc7514_ana_regmap,
[QS] = vsc7514_qs_regmap,
[QSYS] = vsc7514_qsys_regmap,
[REW] = vsc7514_rew_regmap,
[SYS] = vsc7514_sys_regmap,
[S0] = vsc7514_vcap_regmap,
[S1] = vsc7514_vcap_regmap,
[S2] = vsc7514_vcap_regmap,
[PTP] = vsc7514_ptp_regmap,
[DEV_GMII] = vsc7514_dev_gmii_regmap,
};
EXPORT_SYMBOL(vsc7514_regmap);
static const struct vcap_field vsc7514_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 4 },
[VCAP_ES0_IGR_PORT] = { 4, 4 },
[VCAP_ES0_RSV] = { 8, 2 },
[VCAP_ES0_L2_MC] = { 10, 1 },
[VCAP_ES0_L2_BC] = { 11, 1 },
[VCAP_ES0_VID] = { 12, 12 },
[VCAP_ES0_DP] = { 24, 1 },
[VCAP_ES0_PCP] = { 25, 3 },
};
static const struct vcap_field vsc7514_vcap_es0_actions[] = {
[VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 },
[VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 },
[VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 },
[VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1 },
[VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2 },
[VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2 },
[VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2 },
[VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1 },
[VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2 },
[VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2 },
[VCAP_ES0_ACT_VID_A_VAL] = { 17, 12 },
[VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3 },
[VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1 },
[VCAP_ES0_ACT_VID_B_VAL] = { 33, 12 },
[VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3 },
[VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1 },
[VCAP_ES0_ACT_RSV] = { 49, 24 },
[VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 },
};
static const struct vcap_field vsc7514_vcap_is1_keys[] = {
[VCAP_IS1_HK_TYPE] = { 0, 1 },
[VCAP_IS1_HK_LOOKUP] = { 1, 2 },
[VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 },
[VCAP_IS1_HK_RSV] = { 15, 9 },
[VCAP_IS1_HK_OAM_Y1731] = { 24, 1 },
[VCAP_IS1_HK_L2_MC] = { 25, 1 },
[VCAP_IS1_HK_L2_BC] = { 26, 1 },
[VCAP_IS1_HK_IP_MC] = { 27, 1 },
[VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1 },
[VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1 },
[VCAP_IS1_HK_TPID] = { 30, 1 },
[VCAP_IS1_HK_VID] = { 31, 12 },
[VCAP_IS1_HK_DEI] = { 43, 1 },
[VCAP_IS1_HK_PCP] = { 44, 3 },
/* Specific Fields for IS1 Half Key S1_NORMAL */
[VCAP_IS1_HK_L2_SMAC] = { 47, 48 },
[VCAP_IS1_HK_ETYPE_LEN] = { 95, 1 },
[VCAP_IS1_HK_ETYPE] = { 96, 16 },
[VCAP_IS1_HK_IP_SNAP] = { 112, 1 },
[VCAP_IS1_HK_IP4] = { 113, 1 },
/* Layer-3 Information */
[VCAP_IS1_HK_L3_FRAGMENT] = { 114, 1 },
[VCAP_IS1_HK_L3_FRAG_OFS_GT0] = { 115, 1 },
[VCAP_IS1_HK_L3_OPTIONS] = { 116, 1 },
[VCAP_IS1_HK_L3_DSCP] = { 117, 6 },
[VCAP_IS1_HK_L3_IP4_SIP] = { 123, 32 },
/* Layer-4 Information */
[VCAP_IS1_HK_TCP_UDP] = { 155, 1 },
[VCAP_IS1_HK_TCP] = { 156, 1 },
[VCAP_IS1_HK_L4_SPORT] = { 157, 16 },
[VCAP_IS1_HK_L4_RNG] = { 173, 8 },
/* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
[VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1 },
[VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12 },
[VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1 },
[VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3 },
[VCAP_IS1_HK_IP4_IP4] = { 64, 1 },
[VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1 },
[VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1 },
[VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1 },
[VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6 },
[VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32 },
[VCAP_IS1_HK_IP4_L3_IP4_SIP] = { 106, 32 },
[VCAP_IS1_HK_IP4_L3_PROTO] = { 138, 8 },
[VCAP_IS1_HK_IP4_TCP_UDP] = { 146, 1 },
[VCAP_IS1_HK_IP4_TCP] = { 147, 1 },
[VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 },
[VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 },
};
static const struct vcap_field vsc7514_vcap_is1_actions[] = {
[VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 },
[VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 },
[VCAP_IS1_ACT_QOS_ENA] = { 7, 1 },
[VCAP_IS1_ACT_QOS_VAL] = { 8, 3 },
[VCAP_IS1_ACT_DP_ENA] = { 11, 1 },
[VCAP_IS1_ACT_DP_VAL] = { 12, 1 },
[VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8 },
[VCAP_IS1_ACT_PAG_VAL] = { 21, 8 },
[VCAP_IS1_ACT_RSV] = { 29, 9 },
/* The fields below are incorrectly shifted by 2 in the manual */
[VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1 },
[VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12 },
[VCAP_IS1_ACT_FID_SEL] = { 51, 2 },
[VCAP_IS1_ACT_FID_VAL] = { 53, 13 },
[VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1 },
[VCAP_IS1_ACT_PCP_VAL] = { 67, 3 },
[VCAP_IS1_ACT_DEI_VAL] = { 70, 1 },
[VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1 },
[VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2 },
[VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 },
[VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 },
};
static const struct vcap_field vsc7514_vcap_is2_keys[] = {
/* Common: 46 bits */
[VCAP_IS2_TYPE] = { 0, 4 },
[VCAP_IS2_HK_FIRST] = { 4, 1 },
[VCAP_IS2_HK_PAG] = { 5, 8 },
[VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 12 },
[VCAP_IS2_HK_RSV2] = { 25, 1 },
[VCAP_IS2_HK_HOST_MATCH] = { 26, 1 },
[VCAP_IS2_HK_L2_MC] = { 27, 1 },
[VCAP_IS2_HK_L2_BC] = { 28, 1 },
[VCAP_IS2_HK_VLAN_TAGGED] = { 29, 1 },
[VCAP_IS2_HK_VID] = { 30, 12 },
[VCAP_IS2_HK_DEI] = { 42, 1 },
[VCAP_IS2_HK_PCP] = { 43, 3 },
/* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
[VCAP_IS2_HK_L2_DMAC] = { 46, 48 },
[VCAP_IS2_HK_L2_SMAC] = { 94, 48 },
/* MAC_ETYPE (TYPE=000) */
[VCAP_IS2_HK_MAC_ETYPE_ETYPE] = { 142, 16 },
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = { 158, 16 },
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = { 174, 8 },
[VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = { 182, 3 },
/* MAC_LLC (TYPE=001) */
[VCAP_IS2_HK_MAC_LLC_L2_LLC] = { 142, 40 },
/* MAC_SNAP (TYPE=010) */
[VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = { 142, 40 },
/* MAC_ARP (TYPE=011) */
[VCAP_IS2_HK_MAC_ARP_SMAC] = { 46, 48 },
[VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 94, 1 },
[VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 95, 1 },
[VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 96, 1 },
[VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 97, 1 },
[VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 98, 1 },
[VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 99, 1 },
[VCAP_IS2_HK_MAC_ARP_OPCODE] = { 100, 2 },
[VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = { 102, 32 },
[VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = { 134, 32 },
[VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = { 166, 1 },
/* IP4_TCP_UDP / IP4_OTHER common */
[VCAP_IS2_HK_IP4] = { 46, 1 },
[VCAP_IS2_HK_L3_FRAGMENT] = { 47, 1 },
[VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 48, 1 },
[VCAP_IS2_HK_L3_OPTIONS] = { 49, 1 },
[VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 50, 1 },
[VCAP_IS2_HK_L3_TOS] = { 51, 8 },
[VCAP_IS2_HK_L3_IP4_DIP] = { 59, 32 },
[VCAP_IS2_HK_L3_IP4_SIP] = { 91, 32 },
[VCAP_IS2_HK_DIP_EQ_SIP] = { 123, 1 },
/* IP4_TCP_UDP (TYPE=100) */
[VCAP_IS2_HK_TCP] = { 124, 1 },
[VCAP_IS2_HK_L4_DPORT] = { 125, 16 },
[VCAP_IS2_HK_L4_SPORT] = { 141, 16 },
[VCAP_IS2_HK_L4_RNG] = { 157, 8 },
[VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = { 165, 1 },
[VCAP_IS2_HK_L4_SEQUENCE_EQ0] = { 166, 1 },
[VCAP_IS2_HK_L4_FIN] = { 167, 1 },
[VCAP_IS2_HK_L4_SYN] = { 168, 1 },
[VCAP_IS2_HK_L4_RST] = { 169, 1 },
[VCAP_IS2_HK_L4_PSH] = { 170, 1 },
[VCAP_IS2_HK_L4_ACK] = { 171, 1 },
[VCAP_IS2_HK_L4_URG] = { 172, 1 },
[VCAP_IS2_HK_L4_1588_DOM] = { 173, 8 },
[VCAP_IS2_HK_L4_1588_VER] = { 181, 4 },
/* IP4_OTHER (TYPE=101) */
[VCAP_IS2_HK_IP4_L3_PROTO] = { 124, 8 },
[VCAP_IS2_HK_L3_PAYLOAD] = { 132, 56 },
/* IP6_STD (TYPE=110) */
[VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 46, 1 },
[VCAP_IS2_HK_L3_IP6_SIP] = { 47, 128 },
[VCAP_IS2_HK_IP6_L3_PROTO] = { 175, 8 },
/* OAM (TYPE=111) */
[VCAP_IS2_HK_OAM_MEL_FLAGS] = { 142, 7 },
[VCAP_IS2_HK_OAM_VER] = { 149, 5 },
[VCAP_IS2_HK_OAM_OPCODE] = { 154, 8 },
[VCAP_IS2_HK_OAM_FLAGS] = { 162, 8 },
[VCAP_IS2_HK_OAM_MEPID] = { 170, 16 },
[VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 },
[VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 },
};
static const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 },
[VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 },
[VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 },
[VCAP_IS2_ACT_MASK_MODE] = { 5, 2 },
[VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1 },
[VCAP_IS2_ACT_LRN_DIS] = { 8, 1 },
[VCAP_IS2_ACT_POLICE_ENA] = { 9, 1 },
[VCAP_IS2_ACT_POLICE_IDX] = { 10, 9 },
[VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 19, 1 },
[VCAP_IS2_ACT_PORT_MASK] = { 20, 11 },
[VCAP_IS2_ACT_REW_OP] = { 31, 9 },
[VCAP_IS2_ACT_SMAC_REPLACE_ENA] = { 40, 1 },
[VCAP_IS2_ACT_RSV] = { 41, 2 },
[VCAP_IS2_ACT_ACL_ID] = { 43, 6 },
[VCAP_IS2_ACT_HIT_CNT] = { 49, 32 },
};
struct vcap_props vsc7514_vcap_props[] = {
[VCAP_ES0] = {
.action_type_width = 0,
.action_table = {
[ES0_ACTION_TYPE_NORMAL] = {
.width = 73, /* HIT_STICKY not included */
.count = 1,
},
},
.target = S0,
.keys = vsc7514_vcap_es0_keys,
.actions = vsc7514_vcap_es0_actions,
},
[VCAP_IS1] = {
.action_type_width = 0,
.action_table = {
[IS1_ACTION_TYPE_NORMAL] = {
.width = 78, /* HIT_STICKY not included */
.count = 4,
},
},
.target = S1,
.keys = vsc7514_vcap_is1_keys,
.actions = vsc7514_vcap_is1_actions,
},
[VCAP_IS2] = {
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
.width = 49,
.count = 2
},
[IS2_ACTION_TYPE_SMAC_SIP] = {
.width = 6,
.count = 4
},
},
.target = S2,
.keys = vsc7514_vcap_is2_keys,
.actions = vsc7514_vcap_is2_actions,
},
};
EXPORT_SYMBOL(vsc7514_vcap_props);
|
linux-master
|
drivers/net/ethernet/mscc/vsc7514_regs.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDC R6040 Fast Ethernet MAC support
*
* Copyright (C) 2004 Sten Wang <[email protected]>
* Copyright (C) 2007
* Daniel Gimpelevich <[email protected]>
* Copyright (C) 2007-2012 Florian Fainelli <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/uaccess.h>
#include <linux/phy.h>
#include <asm/processor.h>
#define DRV_NAME "r6040"
#define DRV_VERSION "0.29"
#define DRV_RELDATE "04Jul2016"
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
/* RDC MAC I/O Size */
#define R6040_IO_SIZE 256
/* MAX RDC MAC */
#define MAX_MAC 2
/* MAC registers */
#define MCR0 0x00 /* Control register 0 */
#define MCR0_RCVEN 0x0002 /* Receive enable */
#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
#define MCR0_XMTEN 0x1000 /* Transmission enable */
#define MCR0_FD 0x8000 /* Full/Half duplex */
#define MCR1 0x04 /* Control register 1 */
#define MAC_RST 0x0001 /* Reset the MAC */
#define MBCR 0x08 /* Bus control */
#define MT_ICR 0x0C /* TX interrupt control */
#define MR_ICR 0x10 /* RX interrupt control */
#define MTPR 0x14 /* TX poll command register */
#define TM2TX 0x0001 /* Trigger MAC to transmit */
#define MR_BSR 0x18 /* RX buffer size */
#define MR_DCR 0x1A /* RX descriptor control */
#define MLSR 0x1C /* Last status */
#define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */
#define TX_EXCEEDC 0x2000 /* Transmit exceed collision */
#define TX_LATEC 0x4000 /* Transmit late collision */
#define MMDIO 0x20 /* MDIO control register */
#define MDIO_WRITE 0x4000 /* MDIO write */
#define MDIO_READ 0x2000 /* MDIO read */
#define MMRD 0x24 /* MDIO read data register */
#define MMWD 0x28 /* MDIO write data register */
#define MTD_SA0 0x2C /* TX descriptor start address 0 */
#define MTD_SA1 0x30 /* TX descriptor start address 1 */
#define MRD_SA0 0x34 /* RX descriptor start address 0 */
#define MRD_SA1 0x38 /* RX descriptor start address 1 */
#define MISR 0x3C /* Status register */
#define MIER 0x40 /* INT enable register */
#define MSK_INT 0x0000 /* Mask off interrupts */
#define RX_FINISH 0x0001 /* RX finished */
#define RX_NO_DESC 0x0002 /* No RX descriptor available */
#define RX_FIFO_FULL 0x0004 /* RX FIFO full */
#define RX_EARLY 0x0008 /* RX early */
#define TX_FINISH 0x0010 /* TX finished */
#define TX_EARLY 0x0080 /* TX early */
#define EVENT_OVRFL 0x0100 /* Event counter overflow */
#define LINK_CHANGED 0x0200 /* PHY link changed */
#define ME_CISR 0x44 /* Event counter INT status */
#define ME_CIER 0x48 /* Event counter INT enable */
#define MR_CNT 0x50 /* Successfully received packet counter */
#define ME_CNT0 0x52 /* Event counter 0 */
#define ME_CNT1 0x54 /* Event counter 1 */
#define ME_CNT2 0x56 /* Event counter 2 */
#define ME_CNT3 0x58 /* Event counter 3 */
#define MT_CNT 0x5A /* Successfully transmit packet counter */
#define ME_CNT4 0x5C /* Event counter 4 */
#define MP_CNT 0x5E /* Pause frame counter register */
#define MAR0 0x60 /* Hash table 0 */
#define MAR1 0x62 /* Hash table 1 */
#define MAR2 0x64 /* Hash table 2 */
#define MAR3 0x66 /* Hash table 3 */
#define MID_0L 0x68 /* Multicast address MID0 Low */
#define MID_0M 0x6A /* Multicast address MID0 Medium */
#define MID_0H 0x6C /* Multicast address MID0 High */
#define MID_1L 0x70 /* MID1 Low */
#define MID_1M 0x72 /* MID1 Medium */
#define MID_1H 0x74 /* MID1 High */
#define MID_2L 0x78 /* MID2 Low */
#define MID_2M 0x7A /* MID2 Medium */
#define MID_2H 0x7C /* MID2 High */
#define MID_3L 0x80 /* MID3 Low */
#define MID_3M 0x82 /* MID3 Medium */
#define MID_3H 0x84 /* MID3 High */
#define PHY_CC 0x88 /* PHY status change configuration register */
#define SCEN 0x8000 /* PHY status change enable */
#define PHYAD_SHIFT 8 /* PHY address shift */
#define TMRDIV_SHIFT 0 /* Timer divider shift */
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
#define MAC_SM_RST 0x0002 /* MAC status machine reset */
#define MD_CSC 0xb6 /* MDC speed control register */
#define MD_CSC_DEFAULT 0x0030
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
#define RX_DCNT 0x80 /* RX descriptor count */
#define MAX_BUF_SIZE 0x600
#define RX_DESC_SIZE (RX_DCNT * sizeof(struct r6040_descriptor))
#define TX_DESC_SIZE (TX_DCNT * sizeof(struct r6040_descriptor))
#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
#define MCAST_MAX 3 /* Max number multicast addresses to filter */
#define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */
/* Descriptor status */
#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
#define DSC_RX_OK 0x4000 /* RX was successful */
#define DSC_RX_ERR 0x0800 /* RX PHY error */
#define DSC_RX_ERR_DRI 0x0400 /* RX dribble packet */
#define DSC_RX_ERR_BUF 0x0200 /* RX length exceeds buffer size */
#define DSC_RX_ERR_LONG 0x0100 /* RX length > maximum packet length */
#define DSC_RX_ERR_RUNT 0x0080 /* RX packet length < 64 byte */
#define DSC_RX_ERR_CRC 0x0040 /* RX CRC error */
#define DSC_RX_BCAST 0x0020 /* RX broadcast (no error) */
#define DSC_RX_MCAST 0x0010 /* RX multicast (no error) */
#define DSC_RX_MCH_HIT 0x0008 /* RX multicast hit in hash table (no error) */
#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
MODULE_AUTHOR("Sten Wang <[email protected]>,"
"Daniel Gimpelevich <[email protected]>,"
"Florian Fainelli <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
/* RX and TX interrupts that we handle */
#define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH)
#define TX_INTS (TX_FINISH)
#define INT_MASK (RX_INTS | TX_INTS)
struct r6040_descriptor {
u16 status, len; /* 0-3 */
__le32 buf; /* 4-7 */
__le32 ndesc; /* 8-B */
u32 rev1; /* C-F */
char *vbufp; /* 10-13 */
struct r6040_descriptor *vndescp; /* 14-17 */
struct sk_buff *skb_ptr; /* 18-1B */
u32 rev2; /* 1C-1F */
} __aligned(32);
struct r6040_private {
spinlock_t lock; /* driver lock */
struct pci_dev *pdev;
struct r6040_descriptor *rx_insert_ptr;
struct r6040_descriptor *rx_remove_ptr;
struct r6040_descriptor *tx_insert_ptr;
struct r6040_descriptor *tx_remove_ptr;
struct r6040_descriptor *rx_ring;
struct r6040_descriptor *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
u16 tx_free_desc;
u16 mcr0;
struct net_device *dev;
struct mii_bus *mii_bus;
struct napi_struct napi;
void __iomem *base;
int old_link;
int old_duplex;
};
static char version[] = DRV_NAME
": RDC R6040 NAPI net driver,"
"version "DRV_VERSION " (" DRV_RELDATE ")";
/* Read a word data from PHY Chip */
static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
{
int limit = MAC_DEF_TIMEOUT;
u16 cmd;
iowrite16(MDIO_READ | reg | (phy_addr << 8), ioaddr + MMDIO);
/* Wait for the read bit to be cleared */
while (limit--) {
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_READ))
break;
udelay(1);
}
if (limit < 0)
return -ETIMEDOUT;
return ioread16(ioaddr + MMRD);
}
/* Write a word data from PHY Chip */
static int r6040_phy_write(void __iomem *ioaddr,
int phy_addr, int reg, u16 val)
{
int limit = MAC_DEF_TIMEOUT;
u16 cmd;
iowrite16(val, ioaddr + MMWD);
/* Write the command to the MDIO bus */
iowrite16(MDIO_WRITE | reg | (phy_addr << 8), ioaddr + MMDIO);
/* Wait for the write bit to be cleared */
while (limit--) {
cmd = ioread16(ioaddr + MMDIO);
if (!(cmd & MDIO_WRITE))
break;
udelay(1);
}
return (limit < 0) ? -ETIMEDOUT : 0;
}
static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
{
struct net_device *dev = bus->priv;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
return r6040_phy_read(ioaddr, phy_addr, reg);
}
static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
int reg, u16 value)
{
struct net_device *dev = bus->priv;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
return r6040_phy_write(ioaddr, phy_addr, reg, value);
}
static void r6040_free_txbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
int i;
for (i = 0; i < TX_DCNT; i++) {
if (lp->tx_insert_ptr->skb_ptr) {
dma_unmap_single(&lp->pdev->dev,
le32_to_cpu(lp->tx_insert_ptr->buf),
MAX_BUF_SIZE, DMA_TO_DEVICE);
dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
lp->tx_insert_ptr->skb_ptr = NULL;
}
lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
}
}
static void r6040_free_rxbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
int i;
for (i = 0; i < RX_DCNT; i++) {
if (lp->rx_insert_ptr->skb_ptr) {
dma_unmap_single(&lp->pdev->dev,
le32_to_cpu(lp->rx_insert_ptr->buf),
MAX_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
lp->rx_insert_ptr->skb_ptr = NULL;
}
lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp;
}
}
static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
dma_addr_t desc_dma, int size)
{
struct r6040_descriptor *desc = desc_ring;
dma_addr_t mapping = desc_dma;
while (size-- > 0) {
mapping += sizeof(*desc);
desc->ndesc = cpu_to_le32(mapping);
desc->vndescp = desc + 1;
desc++;
}
desc--;
desc->ndesc = cpu_to_le32(desc_dma);
desc->vndescp = desc_ring;
}
static void r6040_init_txbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
lp->tx_free_desc = TX_DCNT;
lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring;
r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT);
}
static int r6040_alloc_rxbufs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct r6040_descriptor *desc;
struct sk_buff *skb;
int rc;
lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring;
r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT);
/* Allocate skbs for the rx descriptors */
desc = lp->rx_ring;
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
rc = -ENOMEM;
goto err_exit;
}
desc->skb_ptr = skb;
desc->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev,
desc->skb_ptr->data,
MAX_BUF_SIZE,
DMA_FROM_DEVICE));
desc->status = DSC_OWNER_MAC;
desc = desc->vndescp;
} while (desc != lp->rx_ring);
return 0;
err_exit:
/* Deallocate all previously allocated skbs */
r6040_free_rxbufs(dev);
return rc;
}
static void r6040_reset_mac(struct r6040_private *lp)
{
void __iomem *ioaddr = lp->base;
int limit = MAC_DEF_TIMEOUT;
u16 cmd, md_csc;
md_csc = ioread16(ioaddr + MD_CSC);
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
if (cmd & MAC_RST)
break;
}
/* Reset internal state machine */
iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
/* Restore MDIO clock frequency */
if (md_csc != MD_CSC_DEFAULT)
iowrite16(md_csc, ioaddr + MD_CSC);
}
static void r6040_init_mac_regs(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
/* Mask Off Interrupt */
iowrite16(MSK_INT, ioaddr + MIER);
/* Reset RDC MAC */
r6040_reset_mac(lp);
/* MAC Bus Control Register */
iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
/* Buffer Size Register */
iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR);
/* Write TX ring start address */
iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0);
iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1);
/* Write RX ring start address */
iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0);
iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1);
/* Set interrupt waiting time and packet numbers */
iowrite16(0, ioaddr + MT_ICR);
iowrite16(0, ioaddr + MR_ICR);
/* Enable interrupts */
iowrite16(INT_MASK, ioaddr + MIER);
/* Enable TX and RX */
iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
/* Let TX poll the descriptors
* we may got called by r6040_tx_timeout which has left
* some unsent tx buffers */
iowrite16(TM2TX, ioaddr + MTPR);
}
static void r6040_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
netdev_warn(dev, "transmit timed out, int enable %4.4x "
"status %4.4x\n",
ioread16(ioaddr + MIER),
ioread16(ioaddr + MISR));
dev->stats.tx_errors++;
/* Reset MAC and re-init all registers */
r6040_init_mac_regs(dev);
}
static struct net_device_stats *r6040_get_stats(struct net_device *dev)
{
struct r6040_private *priv = netdev_priv(dev);
void __iomem *ioaddr = priv->base;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1);
dev->stats.multicast += ioread8(ioaddr + ME_CNT0);
spin_unlock_irqrestore(&priv->lock, flags);
return &dev->stats;
}
/* Stop RDC MAC and Free the allocated resource */
static void r6040_down(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
const u16 *adrp;
/* Stop MAC */
iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
/* Reset RDC MAC */
r6040_reset_mac(lp);
/* Restore MAC Address to MIDx */
adrp = (const u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
}
static int r6040_close(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pdev;
phy_stop(dev->phydev);
napi_disable(&lp->napi);
netif_stop_queue(dev);
spin_lock_irq(&lp->lock);
r6040_down(dev);
/* Free RX buffer */
r6040_free_rxbufs(dev);
/* Free TX buffer */
r6040_free_txbufs(dev);
spin_unlock_irq(&lp->lock);
free_irq(dev->irq, dev);
/* Free Descriptor memory */
if (lp->rx_ring) {
dma_free_coherent(&pdev->dev, RX_DESC_SIZE, lp->rx_ring,
lp->rx_ring_dma);
lp->rx_ring = NULL;
}
if (lp->tx_ring) {
dma_free_coherent(&pdev->dev, TX_DESC_SIZE, lp->tx_ring,
lp->tx_ring_dma);
lp->tx_ring = NULL;
}
return 0;
}
static int r6040_rx(struct net_device *dev, int limit)
{
struct r6040_private *priv = netdev_priv(dev);
struct r6040_descriptor *descptr = priv->rx_remove_ptr;
struct sk_buff *skb_ptr, *new_skb;
int count = 0;
u16 err;
/* Limit not reached and the descriptor belongs to the CPU */
while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
/* Read the descriptor status */
err = descptr->status;
/* Global error status set */
if (err & DSC_RX_ERR) {
/* RX dribble */
if (err & DSC_RX_ERR_DRI)
dev->stats.rx_frame_errors++;
/* Buffer length exceeded */
if (err & DSC_RX_ERR_BUF)
dev->stats.rx_length_errors++;
/* Packet too long */
if (err & DSC_RX_ERR_LONG)
dev->stats.rx_length_errors++;
/* Packet < 64 bytes */
if (err & DSC_RX_ERR_RUNT)
dev->stats.rx_length_errors++;
/* CRC error */
if (err & DSC_RX_ERR_CRC) {
spin_lock(&priv->lock);
dev->stats.rx_crc_errors++;
spin_unlock(&priv->lock);
}
goto next_descr;
}
/* Packet successfully received */
new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!new_skb) {
dev->stats.rx_dropped++;
goto next_descr;
}
skb_ptr = descptr->skb_ptr;
skb_ptr->dev = priv->dev;
/* Do not count the CRC */
skb_put(skb_ptr, descptr->len - ETH_FCS_LEN);
dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
MAX_BUF_SIZE, DMA_FROM_DEVICE);
skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
/* Send to upper layer */
netif_receive_skb(skb_ptr);
dev->stats.rx_packets++;
dev->stats.rx_bytes += descptr->len - ETH_FCS_LEN;
/* put new skb into descriptor */
descptr->skb_ptr = new_skb;
descptr->buf = cpu_to_le32(dma_map_single(&priv->pdev->dev,
descptr->skb_ptr->data,
MAX_BUF_SIZE,
DMA_FROM_DEVICE));
next_descr:
/* put the descriptor back to the MAC */
descptr->status = DSC_OWNER_MAC;
descptr = descptr->vndescp;
count++;
}
priv->rx_remove_ptr = descptr;
return count;
}
static void r6040_tx(struct net_device *dev)
{
struct r6040_private *priv = netdev_priv(dev);
struct r6040_descriptor *descptr;
void __iomem *ioaddr = priv->base;
struct sk_buff *skb_ptr;
u16 err;
spin_lock(&priv->lock);
descptr = priv->tx_remove_ptr;
while (priv->tx_free_desc < TX_DCNT) {
/* Check for errors */
err = ioread16(ioaddr + MLSR);
if (err & TX_FIFO_UNDR)
dev->stats.tx_fifo_errors++;
if (err & (TX_EXCEEDC | TX_LATEC))
dev->stats.tx_carrier_errors++;
if (descptr->status & DSC_OWNER_MAC)
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
/* Statistic Counter */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb_ptr->len;
dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
skb_ptr->len, DMA_TO_DEVICE);
/* Free buffer */
dev_kfree_skb(skb_ptr);
descptr->skb_ptr = NULL;
/* To next descriptor */
descptr = descptr->vndescp;
priv->tx_free_desc++;
}
priv->tx_remove_ptr = descptr;
if (priv->tx_free_desc)
netif_wake_queue(dev);
spin_unlock(&priv->lock);
}
static int r6040_poll(struct napi_struct *napi, int budget)
{
struct r6040_private *priv =
container_of(napi, struct r6040_private, napi);
struct net_device *dev = priv->dev;
void __iomem *ioaddr = priv->base;
int work_done;
r6040_tx(dev);
work_done = r6040_rx(dev, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
/* Enable RX/TX interrupt */
iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
ioaddr + MIER);
}
return work_done;
}
/* The RDC interrupt handler. */
static irqreturn_t r6040_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
u16 misr, status;
/* Save MIER */
misr = ioread16(ioaddr + MIER);
/* Mask off RDC MAC interrupt */
iowrite16(MSK_INT, ioaddr + MIER);
/* Read MISR status and clear */
status = ioread16(ioaddr + MISR);
if (status == 0x0000 || status == 0xffff) {
/* Restore RDC MAC interrupt */
iowrite16(misr, ioaddr + MIER);
return IRQ_NONE;
}
/* RX interrupt request */
if (status & (RX_INTS | TX_INTS)) {
if (status & RX_NO_DESC) {
/* RX descriptor unavailable */
dev->stats.rx_dropped++;
dev->stats.rx_missed_errors++;
}
if (status & RX_FIFO_FULL)
dev->stats.rx_fifo_errors++;
if (likely(napi_schedule_prep(&lp->napi))) {
/* Mask off RX interrupt */
misr &= ~(RX_INTS | TX_INTS);
__napi_schedule_irqoff(&lp->napi);
}
}
/* Restore RDC MAC interrupt */
iowrite16(misr, ioaddr + MIER);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void r6040_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
r6040_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
/* Init RDC MAC */
static int r6040_up(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
int ret;
/* Initialise and alloc RX/TX buffers */
r6040_init_txbufs(dev);
ret = r6040_alloc_rxbufs(dev);
if (ret)
return ret;
/* improve performance (by RDC guys) */
r6040_phy_write(ioaddr, 30, 17,
(r6040_phy_read(ioaddr, 30, 17) | 0x4000));
r6040_phy_write(ioaddr, 30, 17,
~((~r6040_phy_read(ioaddr, 30, 17)) | 0x2000));
r6040_phy_write(ioaddr, 0, 19, 0x0000);
r6040_phy_write(ioaddr, 0, 30, 0x01F0);
/* Initialize all MAC registers */
r6040_init_mac_regs(dev);
phy_start(dev->phydev);
return 0;
}
/* Read/set MAC address routines */
static void r6040_mac_address(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
const u16 *adrp;
/* Reset MAC */
r6040_reset_mac(lp);
/* Restore MAC Address */
adrp = (const u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
}
static int r6040_open(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
int ret;
/* Request IRQ and Register interrupt handler */
ret = request_irq(dev->irq, r6040_interrupt,
IRQF_SHARED, dev->name, dev);
if (ret)
goto out;
/* Set MAC address */
r6040_mac_address(dev);
/* Allocate Descriptor memory */
lp->rx_ring =
dma_alloc_coherent(&lp->pdev->dev, RX_DESC_SIZE,
&lp->rx_ring_dma, GFP_KERNEL);
if (!lp->rx_ring) {
ret = -ENOMEM;
goto err_free_irq;
}
lp->tx_ring =
dma_alloc_coherent(&lp->pdev->dev, TX_DESC_SIZE,
&lp->tx_ring_dma, GFP_KERNEL);
if (!lp->tx_ring) {
ret = -ENOMEM;
goto err_free_rx_ring;
}
ret = r6040_up(dev);
if (ret)
goto err_free_tx_ring;
napi_enable(&lp->napi);
netif_start_queue(dev);
return 0;
err_free_tx_ring:
dma_free_coherent(&lp->pdev->dev, TX_DESC_SIZE, lp->tx_ring,
lp->tx_ring_dma);
err_free_rx_ring:
dma_free_coherent(&lp->pdev->dev, RX_DESC_SIZE, lp->rx_ring,
lp->rx_ring_dma);
err_free_irq:
free_irq(dev->irq, dev);
out:
return ret;
}
static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct r6040_descriptor *descptr;
void __iomem *ioaddr = lp->base;
unsigned long flags;
if (skb_put_padto(skb, ETH_ZLEN) < 0)
return NETDEV_TX_OK;
/* Critical Section */
spin_lock_irqsave(&lp->lock, flags);
/* TX resource check */
if (!lp->tx_free_desc) {
spin_unlock_irqrestore(&lp->lock, flags);
netif_stop_queue(dev);
netdev_err(dev, ": no tx descriptor\n");
return NETDEV_TX_BUSY;
}
/* Set TX descriptor & Transmit it */
lp->tx_free_desc--;
descptr = lp->tx_insert_ptr;
descptr->len = skb->len;
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE));
descptr->status = DSC_OWNER_MAC;
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
if (!netdev_xmit_more() || netif_queue_stopped(dev))
iowrite16(TM2TX, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
/* If no tx resource, stop */
if (!lp->tx_free_desc)
netif_stop_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
static void r6040_multicast_list(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
const u16 *adrp;
u16 hash_table[4] = { 0 };
spin_lock_irqsave(&lp->lock, flags);
/* Keep our MAC Address */
adrp = (const u16 *)dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
/* Clear AMCP & PROM bits */
lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
/* Promiscuous mode */
if (dev->flags & IFF_PROMISC)
lp->mcr0 |= MCR0_PROMISC;
/* Enable multicast hash table function to
* receive all multicast packets. */
else if (dev->flags & IFF_ALLMULTI) {
lp->mcr0 |= MCR0_HASH_EN;
for (i = 0; i < MCAST_MAX ; i++) {
iowrite16(0, ioaddr + MID_1L + 8 * i);
iowrite16(0, ioaddr + MID_1M + 8 * i);
iowrite16(0, ioaddr + MID_1H + 8 * i);
}
for (i = 0; i < 4; i++)
hash_table[i] = 0xffff;
}
/* Use internal multicast address registers if the number of
* multicast addresses is not greater than MCAST_MAX. */
else if (netdev_mc_count(dev) <= MCAST_MAX) {
i = 0;
netdev_for_each_mc_addr(ha, dev) {
u16 *adrp = (u16 *) ha->addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
i++;
}
while (i < MCAST_MAX) {
iowrite16(0, ioaddr + MID_1L + 8 * i);
iowrite16(0, ioaddr + MID_1M + 8 * i);
iowrite16(0, ioaddr + MID_1H + 8 * i);
i++;
}
}
/* Otherwise, Enable multicast hash table function. */
else {
u32 crc;
lp->mcr0 |= MCR0_HASH_EN;
for (i = 0; i < MCAST_MAX ; i++) {
iowrite16(0, ioaddr + MID_1L + 8 * i);
iowrite16(0, ioaddr + MID_1M + 8 * i);
iowrite16(0, ioaddr + MID_1H + 8 * i);
}
/* Build multicast hash table */
netdev_for_each_mc_addr(ha, dev) {
u8 *addrs = ha->addr;
crc = ether_crc(ETH_ALEN, addrs);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
}
iowrite16(lp->mcr0, ioaddr + MCR0);
/* Fill the MAC hash tables with their values */
if (lp->mcr0 & MCR0_HASH_EN) {
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
iowrite16(hash_table[2], ioaddr + MAR2);
iowrite16(hash_table[3], ioaddr + MAR3);
}
spin_unlock_irqrestore(&lp->lock, flags);
}
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct r6040_private *rp = netdev_priv(dev);
strscpy(info->driver, DRV_NAME, sizeof(info->driver));
strscpy(info->version, DRV_VERSION, sizeof(info->version));
strscpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.nway_reset = phy_ethtool_nway_reset,
};
static const struct net_device_ops r6040_netdev_ops = {
.ndo_open = r6040_open,
.ndo_stop = r6040_close,
.ndo_start_xmit = r6040_start_xmit,
.ndo_get_stats = r6040_get_stats,
.ndo_set_rx_mode = r6040_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_eth_ioctl = phy_do_ioctl,
.ndo_tx_timeout = r6040_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = r6040_poll_controller,
#endif
};
static void r6040_adjust_link(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
int status_changed = 0;
void __iomem *ioaddr = lp->base;
BUG_ON(!phydev);
if (lp->old_link != phydev->link) {
status_changed = 1;
lp->old_link = phydev->link;
}
/* reflect duplex change */
if (phydev->link && (lp->old_duplex != phydev->duplex)) {
lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
iowrite16(lp->mcr0, ioaddr);
status_changed = 1;
lp->old_duplex = phydev->duplex;
}
if (status_changed)
phy_print_status(phydev);
}
static int r6040_mii_probe(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
struct phy_device *phydev = NULL;
phydev = phy_find_first(lp->mii_bus);
if (!phydev) {
dev_err(&lp->pdev->dev, "no PHY found\n");
return -ENODEV;
}
phydev = phy_connect(dev, phydev_name(phydev), &r6040_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(&lp->pdev->dev, "could not attach to PHY\n");
return PTR_ERR(phydev);
}
phy_set_max_speed(phydev, SPEED_100);
lp->old_link = 0;
lp->old_duplex = -1;
phy_attached_info(phydev);
return 0;
}
static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct r6040_private *lp;
void __iomem *ioaddr;
int err, io_size = R6040_IO_SIZE;
static int card_idx = -1;
u16 addr[ETH_ALEN / 2];
int bar = 0;
pr_info("%s\n", version);
err = pci_enable_device(pdev);
if (err)
goto err_out;
/* this should always be supported */
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
goto err_out_disable_dev;
}
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
goto err_out_disable_dev;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
goto err_out_disable_dev;
}
pci_set_master(pdev);
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
err = -ENOMEM;
goto err_out_disable_dev;
}
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Failed to request PCI regions\n");
goto err_out_free_dev;
}
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
dev_err(&pdev->dev, "ioremap failed for device\n");
err = -EIO;
goto err_out_free_res;
}
/* If PHY status change register is still set to zero it means the
* bootloader didn't initialize it, so we set it to:
* - enable phy status change
* - enable all phy addresses
* - set to lowest timer divider */
if (ioread16(ioaddr + PHY_CC) == 0)
iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
/* Init system & device */
lp->base = ioaddr;
dev->irq = pdev->irq;
spin_lock_init(&lp->lock);
pci_set_drvdata(pdev, dev);
/* Set MAC address */
card_idx++;
addr[0] = ioread16(ioaddr + MID_0L);
addr[1] = ioread16(ioaddr + MID_0M);
addr[2] = ioread16(ioaddr + MID_0H);
eth_hw_addr_set(dev, (u8 *)addr);
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
if (!(addr[0] || addr[1] || addr[2])) {
netdev_warn(dev, "MAC address not initialized, "
"generating random\n");
eth_hw_addr_random(dev);
}
/* Link new device into r6040_root_dev */
lp->pdev = pdev;
lp->dev = dev;
/* Init RDC private data */
lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
/* The RDC-specific entries in the device structure. */
dev->netdev_ops = &r6040_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
netif_napi_add(dev, &lp->napi, r6040_poll);
lp->mii_bus = mdiobus_alloc();
if (!lp->mii_bus) {
dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
err = -ENOMEM;
goto err_out_unmap;
}
lp->mii_bus->priv = dev;
lp->mii_bus->read = r6040_mdiobus_read;
lp->mii_bus->write = r6040_mdiobus_write;
lp->mii_bus->name = "r6040_eth_mii";
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
dev_name(&pdev->dev), card_idx);
err = mdiobus_register(lp->mii_bus);
if (err) {
dev_err(&pdev->dev, "failed to register MII bus\n");
goto err_out_mdio;
}
err = r6040_mii_probe(dev);
if (err) {
dev_err(&pdev->dev, "failed to probe MII bus\n");
goto err_out_mdio_unregister;
}
/* Register net device. After this dev->name assign */
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Failed to register net device\n");
goto err_out_phy_disconnect;
}
return 0;
err_out_phy_disconnect:
phy_disconnect(dev->phydev);
err_out_mdio_unregister:
mdiobus_unregister(lp->mii_bus);
err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
netif_napi_del(&lp->napi);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_free_dev:
free_netdev(dev);
err_out_disable_dev:
pci_disable_device(pdev);
err_out:
return err;
}
static void r6040_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct r6040_private *lp = netdev_priv(dev);
unregister_netdev(dev);
phy_disconnect(dev->phydev);
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
netif_napi_del(&lp->napi);
pci_iounmap(pdev, lp->base);
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
}
static const struct pci_device_id r6040_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, r6040_pci_tbl);
static struct pci_driver r6040_driver = {
.name = DRV_NAME,
.id_table = r6040_pci_tbl,
.probe = r6040_init_one,
.remove = r6040_remove_one,
};
module_pci_driver(r6040_driver);
|
linux-master
|
drivers/net/ethernet/rdc/r6040.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Gerhard Engleder <[email protected]> */
#include "tsnep.h"
void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
{
u32 high_before;
u32 low;
u32 high;
/* read high dword twice to detect overrun */
high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
do {
low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
high_before = high;
high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
} while (high != high_before);
*time = (((u64)high) << 32) | ((u64)low);
}
int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct hwtstamp_config config;
if (!ifr)
return -EINVAL;
if (cmd == SIOCSHWTSTAMP) {
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
memcpy(&adapter->hwtstamp_config, &config,
sizeof(adapter->hwtstamp_config));
}
if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
sizeof(adapter->hwtstamp_config)))
return -EFAULT;
return 0;
}
static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
ptp_clock_info);
bool negative = false;
u64 rate_offset;
if (scaled_ppm < 0) {
scaled_ppm = -scaled_ppm;
negative = true;
}
/* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to
* eliminate ppm, multiply with 8 to compensate 8ns clock cycle time,
* simplify calculation because 15625 * 8 = 1000000 / 8
*/
rate_offset = scaled_ppm;
rate_offset <<= 16 - 3;
rate_offset = div_u64(rate_offset, 15625);
rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK;
if (negative)
rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN;
iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE);
return 0;
}
static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
ptp_clock_info);
u64 system_time;
unsigned long flags;
spin_lock_irqsave(&adapter->ptp_lock, flags);
tsnep_get_system_time(adapter, &system_time);
system_time += delta;
/* high dword is buffered in hardware and synchronously written to
* system time when low dword is written
*/
iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
iowrite32(system_time & 0xFFFFFFFF,
adapter->addr + ECM_SYSTEM_TIME_LOW);
spin_unlock_irqrestore(&adapter->ptp_lock, flags);
return 0;
}
static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
ptp_clock_info);
u32 high_before;
u32 low;
u32 high;
u64 system_time;
/* read high dword twice to detect overrun */
high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
do {
ptp_read_system_prets(sts);
low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
ptp_read_system_postts(sts);
high_before = high;
high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
} while (high != high_before);
system_time = (((u64)high) << 32) | ((u64)low);
*ts = ns_to_timespec64(system_time);
return 0;
}
static int tsnep_ptp_settime64(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
ptp_clock_info);
u64 system_time = timespec64_to_ns(ts);
unsigned long flags;
spin_lock_irqsave(&adapter->ptp_lock, flags);
/* high dword is buffered in hardware and synchronously written to
* system time when low dword is written
*/
iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
iowrite32(system_time & 0xFFFFFFFF,
adapter->addr + ECM_SYSTEM_TIME_LOW);
spin_unlock_irqrestore(&adapter->ptp_lock, flags);
return 0;
}
static int tsnep_ptp_getcyclesx64(struct ptp_clock_info *ptp,
struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
ptp_clock_info);
u32 high_before;
u32 low;
u32 high;
u64 counter;
/* read high dword twice to detect overrun */
high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
do {
ptp_read_system_prets(sts);
low = ioread32(adapter->addr + ECM_COUNTER_LOW);
ptp_read_system_postts(sts);
high_before = high;
high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
} while (high != high_before);
counter = (((u64)high) << 32) | ((u64)low);
*ts = ns_to_timespec64(counter);
return 0;
}
int tsnep_ptp_init(struct tsnep_adapter *adapter)
{
int retval = 0;
adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP);
adapter->ptp_clock_info.owner = THIS_MODULE;
/* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time,
* stay slightly below because only bits below 2^-1ns are supported
*/
adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1);
adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine;
adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime;
adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64;
adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64;
adapter->ptp_clock_info.getcyclesx64 = tsnep_ptp_getcyclesx64;
spin_lock_init(&adapter->ptp_lock);
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
netdev_err(adapter->netdev, "ptp_clock_register failed\n");
retval = PTR_ERR(adapter->ptp_clock);
adapter->ptp_clock = NULL;
} else if (adapter->ptp_clock) {
netdev_info(adapter->netdev, "PHC added\n");
}
return retval;
}
void tsnep_ptp_cleanup(struct tsnep_adapter *adapter)
{
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
netdev_info(adapter->netdev, "PHC removed\n");
}
}
|
linux-master
|
drivers/net/ethernet/engleder/tsnep_ptp.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Gerhard Engleder <[email protected]> */
/* TSN endpoint Ethernet MAC driver
*
* The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
* communication. It is designed for endpoints within TSN (Time Sensitive
* Networking) networks; e.g., for PLCs in the industrial automation case.
*
* It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
* by the driver.
*
* More information can be found here:
* - www.embedded-experts.at/tsn
* - www.engleder-embedded.com
*/
#include "tsnep.h"
#include "tsnep_hw.h"
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include <linux/iopoll.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <net/page_pool/helpers.h>
#include <net/xdp_sock_drv.h>
#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
#define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4)
#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
/* XSK buffer shall store at least Q-in-Q frame */
#define TSNEP_XSK_RX_BUF_SIZE (ALIGN(TSNEP_RX_INLINE_METADATA_SIZE + \
ETH_FRAME_LEN + ETH_FCS_LEN + \
VLAN_HLEN * 2, 4))
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
#else
#define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
#endif
#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
#define TSNEP_COALESCE_USECS_DEFAULT 64
#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
#define TSNEP_TX_TYPE_SKB BIT(0)
#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
#define TSNEP_TX_TYPE_XDP_TX BIT(2)
#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
#define TSNEP_TX_TYPE_XSK BIT(4)
#define TSNEP_XDP_TX BIT(0)
#define TSNEP_XDP_REDIRECT BIT(1)
static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
{
iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
}
static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
{
mask |= ECM_INT_DISABLE;
iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
}
static irqreturn_t tsnep_irq(int irq, void *arg)
{
struct tsnep_adapter *adapter = arg;
u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
/* acknowledge interrupt */
if (active != 0)
iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
/* handle link interrupt */
if ((active & ECM_INT_LINK) != 0)
phy_mac_interrupt(adapter->netdev->phydev);
/* handle TX/RX queue 0 interrupt */
if ((active & adapter->queue[0].irq_mask) != 0) {
if (napi_schedule_prep(&adapter->queue[0].napi)) {
tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
/* schedule after masking to avoid races */
__napi_schedule(&adapter->queue[0].napi);
}
}
return IRQ_HANDLED;
}
static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
{
struct tsnep_queue *queue = arg;
/* handle TX/RX queue interrupt */
if (napi_schedule_prep(&queue->napi)) {
tsnep_disable_irq(queue->adapter, queue->irq_mask);
/* schedule after masking to avoid races */
__napi_schedule(&queue->napi);
}
return IRQ_HANDLED;
}
int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs)
{
if (usecs > TSNEP_COALESCE_USECS_MAX)
return -ERANGE;
usecs /= ECM_INT_DELAY_BASE_US;
usecs <<= ECM_INT_DELAY_SHIFT;
usecs &= ECM_INT_DELAY_MASK;
queue->irq_delay &= ~ECM_INT_DELAY_MASK;
queue->irq_delay |= usecs;
iowrite8(queue->irq_delay, queue->irq_delay_addr);
return 0;
}
u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue)
{
u32 usecs;
usecs = (queue->irq_delay & ECM_INT_DELAY_MASK);
usecs >>= ECM_INT_DELAY_SHIFT;
usecs *= ECM_INT_DELAY_BASE_US;
return usecs;
}
static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
{
struct tsnep_adapter *adapter = bus->priv;
u32 md;
int retval;
md = ECM_MD_READ;
if (!adapter->suppress_preamble)
md |= ECM_MD_PREAMBLE;
md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
iowrite32(md, adapter->addr + ECM_MD_CONTROL);
retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
!(md & ECM_MD_BUSY), 16, 1000);
if (retval != 0)
return retval;
return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
}
static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
u16 val)
{
struct tsnep_adapter *adapter = bus->priv;
u32 md;
int retval;
md = ECM_MD_WRITE;
if (!adapter->suppress_preamble)
md |= ECM_MD_PREAMBLE;
md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
iowrite32(md, adapter->addr + ECM_MD_CONTROL);
retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
!(md & ECM_MD_BUSY), 16, 1000);
if (retval != 0)
return retval;
return 0;
}
static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
{
u32 mode;
switch (adapter->phydev->speed) {
case SPEED_100:
mode = ECM_LINK_MODE_100;
break;
case SPEED_1000:
mode = ECM_LINK_MODE_1000;
break;
default:
mode = ECM_LINK_MODE_OFF;
break;
}
iowrite32(mode, adapter->addr + ECM_STATUS);
}
static void tsnep_phy_link_status_change(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
if (phydev->link)
tsnep_set_link_mode(adapter);
phy_print_status(netdev->phydev);
}
static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
{
int retval;
retval = phy_loopback(adapter->phydev, enable);
/* PHY link state change is not signaled if loopback is enabled, it
* would delay a working loopback anyway, let's ensure that loopback
* is working immediately by setting link mode directly
*/
if (!retval && enable)
tsnep_set_link_mode(adapter);
return retval;
}
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
struct ethtool_eee ethtool_eee;
int retval;
retval = phy_connect_direct(adapter->netdev, adapter->phydev,
tsnep_phy_link_status_change,
adapter->phy_mode);
if (retval)
return retval;
phydev = adapter->netdev->phydev;
/* MAC supports only 100Mbps|1000Mbps full duplex
* SPE (Single Pair Ethernet) is also an option but not implemented yet
*/
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
/* disable EEE autoneg, EEE not supported by TSNEP */
memset(ðtool_eee, 0, sizeof(ethtool_eee));
phy_ethtool_set_eee(adapter->phydev, ðtool_eee);
adapter->phydev->irq = PHY_MAC_INTERRUPT;
phy_start(adapter->phydev);
return 0;
}
static void tsnep_phy_close(struct tsnep_adapter *adapter)
{
phy_stop(adapter->netdev->phydev);
phy_disconnect(adapter->netdev->phydev);
}
static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
{
struct device *dmadev = tx->adapter->dmadev;
int i;
memset(tx->entry, 0, sizeof(tx->entry));
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
if (tx->page[i]) {
dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
tx->page_dma[i]);
tx->page[i] = NULL;
tx->page_dma[i] = 0;
}
}
}
static int tsnep_tx_ring_create(struct tsnep_tx *tx)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
struct tsnep_tx_entry *next_entry;
int i, j;
int retval;
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
tx->page[i] =
dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
GFP_KERNEL);
if (!tx->page[i]) {
retval = -ENOMEM;
goto alloc_failed;
}
for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
entry->desc_wb = (struct tsnep_tx_desc_wb *)
(((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
entry->desc = (struct tsnep_tx_desc *)
(((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
entry->owner_user_flag = false;
}
}
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &tx->entry[i];
next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK];
entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
}
return 0;
alloc_failed:
tsnep_tx_ring_cleanup(tx);
return retval;
}
static void tsnep_tx_init(struct tsnep_tx *tx)
{
dma_addr_t dma;
dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
tx->write = 0;
tx->read = 0;
tx->owner_counter = 1;
tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
}
static void tsnep_tx_enable(struct tsnep_tx *tx)
{
struct netdev_queue *nq;
nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
__netif_tx_lock_bh(nq);
netif_tx_wake_queue(nq);
__netif_tx_unlock_bh(nq);
}
static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi)
{
struct netdev_queue *nq;
u32 val;
nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
__netif_tx_lock_bh(nq);
netif_tx_stop_queue(nq);
__netif_tx_unlock_bh(nq);
/* wait until TX is done in hardware */
readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
1000000);
/* wait until TX is also done in software */
while (READ_ONCE(tx->read) != tx->write) {
napi_schedule(napi);
napi_synchronize(napi);
}
}
static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
bool last)
{
struct tsnep_tx_entry *entry = &tx->entry[index];
entry->properties = 0;
/* xdpf and zc are union with skb */
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
(skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
*
* Only the first fragment is acknowledged. For all other
* fragments no acknowledge is done and the last written owner
* counter stays in the writeback descriptor. Therefore, it is
* possible that the last written owner counter is identical to
* the new incremented owner counter and a false acknowledge is
* detected before the real acknowledge has been done by
* hardware.
*
* The user flag is used to prevent this situation. The user
* flag is copied to the writeback descriptor by the hardware
* and is used as additional acknowledge data. By toggeling the
* user flag only for the first fragment (which is
* acknowledged), it is guaranteed that the last acknowledge
* done for this descriptor has used a different user flag and
* cannot be detected as false acknowledge.
*/
entry->owner_user_flag = !entry->owner_user_flag;
}
if (last)
entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
if (index == tx->increment_owner_counter) {
tx->owner_counter++;
if (tx->owner_counter == 4)
tx->owner_counter = 1;
tx->increment_owner_counter--;
if (tx->increment_owner_counter < 0)
tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
}
entry->properties |=
(tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
TSNEP_DESC_OWNER_COUNTER_MASK;
if (entry->owner_user_flag)
entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
entry->desc->more_properties =
__cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
/* descriptor properties shall be written last, because valid data is
* signaled there
*/
dma_wmb();
entry->desc->properties = __cpu_to_le32(entry->properties);
}
static int tsnep_tx_desc_available(struct tsnep_tx *tx)
{
if (tx->read <= tx->write)
return TSNEP_RING_SIZE - tx->write + tx->read - 1;
else
return tx->read - tx->write - 1;
}
static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
unsigned int len;
dma_addr_t dma;
int map_len = 0;
int i;
for (i = 0; i < count; i++) {
entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
if (!i) {
len = skb_headlen(skb);
dma = dma_map_single(dmadev, skb->data, len,
DMA_TO_DEVICE);
entry->type = TSNEP_TX_TYPE_SKB;
} else {
len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
dma = skb_frag_dma_map(dmadev,
&skb_shinfo(skb)->frags[i - 1],
0, len, DMA_TO_DEVICE);
entry->type = TSNEP_TX_TYPE_SKB_FRAG;
}
if (dma_mapping_error(dmadev, dma))
return -ENOMEM;
entry->len = len;
dma_unmap_addr_set(entry, dma, dma);
entry->desc->tx = __cpu_to_le64(dma);
map_len += len;
}
return map_len;
}
static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
int map_len = 0;
int i;
for (i = 0; i < count; i++) {
entry = &tx->entry[(index + i) & TSNEP_RING_MASK];
if (entry->len) {
if (entry->type & TSNEP_TX_TYPE_SKB)
dma_unmap_single(dmadev,
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
else if (entry->type &
(TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO))
dma_unmap_page(dmadev,
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
map_len += entry->len;
entry->len = 0;
}
}
return map_len;
}
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
int count = 1;
struct tsnep_tx_entry *entry;
int length;
int i;
int retval;
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
if (tsnep_tx_desc_available(tx) < count) {
/* ring full, shall not happen because queue is stopped if full
* below
*/
netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
return NETDEV_TX_BUSY;
}
entry = &tx->entry[tx->write];
entry->skb = skb;
retval = tsnep_tx_map(skb, tx, count);
if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
tx->dropped++;
return NETDEV_TX_OK;
}
length = retval;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
i == count - 1);
tx->write = (tx->write + count) & TSNEP_RING_MASK;
skb_tx_timestamp(skb);
/* descriptor properties shall be valid before hardware is notified */
dma_wmb();
iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
/* ring can get full with next frame */
netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
}
return NETDEV_TX_OK;
}
static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
struct skb_shared_info *shinfo, int count, u32 type)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
struct page *page;
skb_frag_t *frag;
unsigned int len;
int map_len = 0;
dma_addr_t dma;
void *data;
int i;
frag = NULL;
len = xdpf->len;
for (i = 0; i < count; i++) {
entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
if (type & TSNEP_TX_TYPE_XDP_NDO) {
data = unlikely(frag) ? skb_frag_address(frag) :
xdpf->data;
dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dmadev, dma))
return -ENOMEM;
entry->type = TSNEP_TX_TYPE_XDP_NDO;
} else {
page = unlikely(frag) ? skb_frag_page(frag) :
virt_to_page(xdpf->data);
dma = page_pool_get_dma_addr(page);
if (unlikely(frag))
dma += skb_frag_off(frag);
else
dma += sizeof(*xdpf) + xdpf->headroom;
dma_sync_single_for_device(dmadev, dma, len,
DMA_BIDIRECTIONAL);
entry->type = TSNEP_TX_TYPE_XDP_TX;
}
entry->len = len;
dma_unmap_addr_set(entry, dma, dma);
entry->desc->tx = __cpu_to_le64(dma);
map_len += len;
if (i + 1 < count) {
frag = &shinfo->frags[i];
len = skb_frag_size(frag);
}
}
return map_len;
}
/* This function requires __netif_tx_lock is held by the caller. */
static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
struct tsnep_tx *tx, u32 type)
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
struct tsnep_tx_entry *entry;
int count, length, retval, i;
count = 1;
if (unlikely(xdp_frame_has_frags(xdpf)))
count += shinfo->nr_frags;
/* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
* will be available for normal TX path and queue is stopped there if
* necessary
*/
if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
return false;
entry = &tx->entry[tx->write];
entry->xdpf = xdpf;
retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
entry->xdpf = NULL;
tx->dropped++;
return false;
}
length = retval;
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
i == count - 1);
tx->write = (tx->write + count) & TSNEP_RING_MASK;
/* descriptor properties shall be valid before hardware is notified */
dma_wmb();
return true;
}
static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
{
iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
}
static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
struct xdp_buff *xdp,
struct netdev_queue *tx_nq, struct tsnep_tx *tx)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
bool xmit;
if (unlikely(!xdpf))
return false;
__netif_tx_lock(tx_nq, smp_processor_id());
xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
/* Avoid transmit queue timeout since we share it with the slow path */
if (xmit)
txq_trans_cond_update(tx_nq);
__netif_tx_unlock(tx_nq);
return xmit;
}
static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx)
{
struct tsnep_tx_entry *entry;
dma_addr_t dma;
entry = &tx->entry[tx->write];
entry->zc = true;
dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr);
xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len);
entry->type = TSNEP_TX_TYPE_XSK;
entry->len = xdpd->len;
entry->desc->tx = __cpu_to_le64(dma);
return xdpd->len;
}
static void tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc *xdpd,
struct tsnep_tx *tx)
{
int length;
length = tsnep_xdp_tx_map_zc(xdpd, tx);
tsnep_tx_activate(tx, tx->write, length, true);
tx->write = (tx->write + 1) & TSNEP_RING_MASK;
}
static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx)
{
int desc_available = tsnep_tx_desc_available(tx);
struct xdp_desc *descs = tx->xsk_pool->tx_descs;
int batch, i;
/* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
* will be available for normal TX path and queue is stopped there if
* necessary
*/
if (desc_available <= (MAX_SKB_FRAGS + 1))
return;
desc_available -= MAX_SKB_FRAGS + 1;
batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available);
for (i = 0; i < batch; i++)
tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx);
if (batch) {
/* descriptor properties shall be valid before hardware is
* notified
*/
dma_wmb();
tsnep_xdp_xmit_flush(tx);
}
}
static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
{
struct tsnep_tx_entry *entry;
struct netdev_queue *nq;
int xsk_frames = 0;
int budget = 128;
int length;
int count;
nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
__netif_tx_lock(nq, smp_processor_id());
do {
if (tx->read == tx->write)
break;
entry = &tx->entry[tx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_TX_DESC_OWNER_MASK) !=
(entry->properties & TSNEP_TX_DESC_OWNER_MASK))
break;
/* descriptor properties shall be read first, because valid data
* is signaled there
*/
dma_rmb();
count = 1;
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
else if ((entry->type & TSNEP_TX_TYPE_XDP) &&
xdp_frame_has_frags(entry->xdpf))
count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
length = tsnep_tx_unmap(tx, tx->read, count);
if ((entry->type & TSNEP_TX_TYPE_SKB) &&
(skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
u64 timestamp;
if (skb_shinfo(entry->skb)->tx_flags &
SKBTX_HW_TSTAMP_USE_CYCLES)
timestamp =
__le64_to_cpu(entry->desc_wb->counter);
else
timestamp =
__le64_to_cpu(entry->desc_wb->timestamp);
memset(&hwtstamps, 0, sizeof(hwtstamps));
hwtstamps.hwtstamp = ns_to_ktime(timestamp);
skb_tstamp_tx(entry->skb, &hwtstamps);
}
if (entry->type & TSNEP_TX_TYPE_SKB)
napi_consume_skb(entry->skb, napi_budget);
else if (entry->type & TSNEP_TX_TYPE_XDP)
xdp_return_frame_rx_napi(entry->xdpf);
else
xsk_frames++;
/* xdpf and zc are union with skb */
entry->skb = NULL;
tx->read = (tx->read + count) & TSNEP_RING_MASK;
tx->packets++;
tx->bytes += length + ETH_FCS_LEN;
budget--;
} while (likely(budget));
if (tx->xsk_pool) {
if (xsk_frames)
xsk_tx_completed(tx->xsk_pool, xsk_frames);
if (xsk_uses_need_wakeup(tx->xsk_pool))
xsk_set_tx_need_wakeup(tx->xsk_pool);
tsnep_xdp_xmit_zc(tx);
}
if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
netif_tx_queue_stopped(nq)) {
netif_tx_wake_queue(nq);
}
__netif_tx_unlock(nq);
return budget != 0;
}
static bool tsnep_tx_pending(struct tsnep_tx *tx)
{
struct tsnep_tx_entry *entry;
struct netdev_queue *nq;
bool pending = false;
nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
__netif_tx_lock(nq, smp_processor_id());
if (tx->read != tx->write) {
entry = &tx->entry[tx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_TX_DESC_OWNER_MASK) ==
(entry->properties & TSNEP_TX_DESC_OWNER_MASK))
pending = true;
}
__netif_tx_unlock(nq);
return pending;
}
static int tsnep_tx_open(struct tsnep_tx *tx)
{
int retval;
retval = tsnep_tx_ring_create(tx);
if (retval)
return retval;
tsnep_tx_init(tx);
return 0;
}
static void tsnep_tx_close(struct tsnep_tx *tx)
{
tsnep_tx_ring_cleanup(tx);
}
static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
{
struct device *dmadev = rx->adapter->dmadev;
struct tsnep_rx_entry *entry;
int i;
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
if (!rx->xsk_pool && entry->page)
page_pool_put_full_page(rx->page_pool, entry->page,
false);
if (rx->xsk_pool && entry->xdp)
xsk_buff_free(entry->xdp);
/* xdp is union with page */
entry->page = NULL;
}
if (rx->page_pool)
page_pool_destroy(rx->page_pool);
memset(rx->entry, 0, sizeof(rx->entry));
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
if (rx->page[i]) {
dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
rx->page_dma[i]);
rx->page[i] = NULL;
rx->page_dma[i] = 0;
}
}
}
static int tsnep_rx_ring_create(struct tsnep_rx *rx)
{
struct device *dmadev = rx->adapter->dmadev;
struct tsnep_rx_entry *entry;
struct page_pool_params pp_params = { 0 };
struct tsnep_rx_entry *next_entry;
int i, j;
int retval;
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
rx->page[i] =
dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
GFP_KERNEL);
if (!rx->page[i]) {
retval = -ENOMEM;
goto failed;
}
for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
entry->desc_wb = (struct tsnep_rx_desc_wb *)
(((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
entry->desc = (struct tsnep_rx_desc *)
(((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
}
}
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.order = 0;
pp_params.pool_size = TSNEP_RING_SIZE;
pp_params.nid = dev_to_node(dmadev);
pp_params.dev = dmadev;
pp_params.dma_dir = DMA_BIDIRECTIONAL;
pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
pp_params.offset = TSNEP_RX_OFFSET;
rx->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx->page_pool)) {
retval = PTR_ERR(rx->page_pool);
rx->page_pool = NULL;
goto failed;
}
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK];
entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
}
return 0;
failed:
tsnep_rx_ring_cleanup(rx);
return retval;
}
static void tsnep_rx_init(struct tsnep_rx *rx)
{
dma_addr_t dma;
dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
rx->write = 0;
rx->read = 0;
rx->owner_counter = 1;
rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
}
static void tsnep_rx_enable(struct tsnep_rx *rx)
{
/* descriptor properties shall be valid before hardware is notified */
dma_wmb();
iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
}
static void tsnep_rx_disable(struct tsnep_rx *rx)
{
u32 val;
iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
1000000);
}
static int tsnep_rx_desc_available(struct tsnep_rx *rx)
{
if (rx->read <= rx->write)
return TSNEP_RING_SIZE - rx->write + rx->read - 1;
else
return rx->read - rx->write - 1;
}
static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx)
{
struct page **page;
/* last entry of page_buffer is always zero, because ring cannot be
* filled completely
*/
page = rx->page_buffer;
while (*page) {
page_pool_put_full_page(rx->page_pool, *page, false);
*page = NULL;
page++;
}
}
static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx)
{
int i;
/* alloc for all ring entries except the last one, because ring cannot
* be filled completely
*/
for (i = 0; i < TSNEP_RING_SIZE - 1; i++) {
rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool);
if (!rx->page_buffer[i]) {
tsnep_rx_free_page_buffer(rx);
return -ENOMEM;
}
}
return 0;
}
static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
struct page *page)
{
entry->page = page;
entry->len = TSNEP_MAX_RX_BUF_SIZE;
entry->dma = page_pool_get_dma_addr(entry->page);
entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
}
static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
struct page *page;
page = page_pool_dev_alloc_pages(rx->page_pool);
if (unlikely(!page))
return -ENOMEM;
tsnep_rx_set_page(rx, entry, page);
return 0;
}
static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
struct tsnep_rx_entry *read = &rx->entry[rx->read];
tsnep_rx_set_page(rx, entry, read->page);
read->page = NULL;
}
static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
/* TSNEP_MAX_RX_BUF_SIZE and TSNEP_XSK_RX_BUF_SIZE are multiple of 4 */
entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (index == rx->increment_owner_counter) {
rx->owner_counter++;
if (rx->owner_counter == 4)
rx->owner_counter = 1;
rx->increment_owner_counter--;
if (rx->increment_owner_counter < 0)
rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
}
entry->properties |=
(rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
TSNEP_DESC_OWNER_COUNTER_MASK;
/* descriptor properties shall be written last, because valid data is
* signaled there
*/
dma_wmb();
entry->desc->properties = __cpu_to_le32(entry->properties);
}
static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse)
{
bool alloc_failed = false;
int i, index;
for (i = 0; i < count && !alloc_failed; i++) {
index = (rx->write + i) & TSNEP_RING_MASK;
if (unlikely(tsnep_rx_alloc_buffer(rx, index))) {
rx->alloc_failed++;
alloc_failed = true;
/* reuse only if no other allocation was successful */
if (i == 0 && reuse)
tsnep_rx_reuse_buffer(rx, index);
else
break;
}
tsnep_rx_activate(rx, index);
}
if (i)
rx->write = (rx->write + i) & TSNEP_RING_MASK;
return i;
}
static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
{
int desc_refilled;
desc_refilled = tsnep_rx_alloc(rx, count, reuse);
if (desc_refilled)
tsnep_rx_enable(rx);
return desc_refilled;
}
static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
struct xdp_buff *xdp)
{
entry->xdp = xdp;
entry->len = TSNEP_XSK_RX_BUF_SIZE;
entry->dma = xsk_buff_xdp_get_dma(entry->xdp);
entry->desc->rx = __cpu_to_le64(entry->dma);
}
static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
struct tsnep_rx_entry *read = &rx->entry[rx->read];
tsnep_rx_set_xdp(rx, entry, read->xdp);
read->xdp = NULL;
}
static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse)
{
u32 allocated;
int i;
allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count);
for (i = 0; i < allocated; i++) {
int index = (rx->write + i) & TSNEP_RING_MASK;
struct tsnep_rx_entry *entry = &rx->entry[index];
tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]);
tsnep_rx_activate(rx, index);
}
if (i == 0) {
rx->alloc_failed++;
if (reuse) {
tsnep_rx_reuse_buffer_zc(rx, rx->write);
tsnep_rx_activate(rx, rx->write);
}
}
if (i)
rx->write = (rx->write + i) & TSNEP_RING_MASK;
return i;
}
static void tsnep_rx_free_zc(struct tsnep_rx *rx)
{
int i;
for (i = 0; i < TSNEP_RING_SIZE; i++) {
struct tsnep_rx_entry *entry = &rx->entry[i];
if (entry->xdp)
xsk_buff_free(entry->xdp);
entry->xdp = NULL;
}
}
static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
{
int desc_refilled;
desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse);
if (desc_refilled)
tsnep_rx_enable(rx);
return desc_refilled;
}
static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
struct xdp_buff *xdp, int *status,
struct netdev_queue *tx_nq, struct tsnep_tx *tx)
{
unsigned int length;
unsigned int sync;
u32 act;
length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
case XDP_REDIRECT:
if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
goto out_failure;
*status |= TSNEP_XDP_REDIRECT;
return true;
default:
bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
trace_xdp_exception(rx->adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU
* touch
*/
sync = xdp->data_end - xdp->data_hard_start -
XDP_PACKET_HEADROOM;
sync = max(sync, length);
page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
sync, true);
return true;
}
}
static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
struct xdp_buff *xdp, int *status,
struct netdev_queue *tx_nq,
struct tsnep_tx *tx)
{
u32 act;
act = bpf_prog_run_xdp(prog, xdp);
/* XDP_REDIRECT is the main action for zero-copy */
if (likely(act == XDP_REDIRECT)) {
if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
goto out_failure;
*status |= TSNEP_XDP_REDIRECT;
return true;
}
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
goto out_failure;
*status |= TSNEP_XDP_TX;
return true;
default:
bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
trace_xdp_exception(rx->adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
xsk_buff_free(xdp);
return true;
}
}
static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status,
struct netdev_queue *tx_nq, struct tsnep_tx *tx)
{
if (status & TSNEP_XDP_TX) {
__netif_tx_lock(tx_nq, smp_processor_id());
tsnep_xdp_xmit_flush(tx);
__netif_tx_unlock(tx_nq);
}
if (status & TSNEP_XDP_REDIRECT)
xdp_do_flush();
}
static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
int length)
{
struct sk_buff *skb;
skb = napi_build_skb(page_address(page), PAGE_SIZE);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE);
__skb_put(skb, length - ETH_FCS_LEN);
if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
struct tsnep_rx_inline *rx_inline =
(struct tsnep_rx_inline *)(page_address(page) +
TSNEP_RX_OFFSET);
skb_shinfo(skb)->tx_flags |=
SKBTX_HW_TSTAMP_NETDEV;
memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->netdev_data = rx_inline;
}
skb_record_rx_queue(skb, rx->queue_index);
skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
return skb;
}
static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
struct page *page, int length)
{
struct sk_buff *skb;
skb = tsnep_build_skb(rx, page, length);
if (skb) {
skb_mark_for_recycle(skb);
rx->packets++;
rx->bytes += length;
if (skb->pkt_type == PACKET_MULTICAST)
rx->multicast++;
napi_gro_receive(napi, skb);
} else {
page_pool_recycle_direct(rx->page_pool, page);
rx->dropped++;
}
}
static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct device *dmadev = rx->adapter->dmadev;
enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry;
struct netdev_queue *tx_nq;
struct bpf_prog *prog;
struct xdp_buff xdp;
struct tsnep_tx *tx;
int desc_available;
int xdp_status = 0;
int done = 0;
int length;
desc_available = tsnep_rx_desc_available(rx);
dma_dir = page_pool_get_dma_dir(rx->page_pool);
prog = READ_ONCE(rx->adapter->xdp_prog);
if (prog) {
tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
rx->tx_queue_index);
tx = &rx->adapter->tx[rx->tx_queue_index];
xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
}
while (likely(done < budget) && (rx->read != rx->write)) {
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_OWNER_COUNTER_MASK) !=
(entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
break;
done++;
if (desc_available >= TSNEP_RING_RX_REFILL) {
bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
desc_available -= tsnep_rx_refill(rx, desc_available,
reuse);
if (!entry->page) {
/* buffer has been reused for refill to prevent
* empty RX ring, thus buffer cannot be used for
* RX processing
*/
rx->read = (rx->read + 1) & TSNEP_RING_MASK;
desc_available++;
rx->dropped++;
continue;
}
}
/* descriptor properties shall be read first, because valid data
* is signaled there
*/
dma_rmb();
prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
dma_sync_single_range_for_cpu(dmadev, entry->dma,
TSNEP_RX_OFFSET, length, dma_dir);
/* RX metadata with timestamps is in front of actual data,
* subtract metadata size to get length of actual data and
* consider metadata size as offset of actual data during RX
* processing
*/
length -= TSNEP_RX_INLINE_METADATA_SIZE;
rx->read = (rx->read + 1) & TSNEP_RING_MASK;
desc_available++;
if (prog) {
bool consume;
xdp_prepare_buff(&xdp, page_address(entry->page),
XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
length, false);
consume = tsnep_xdp_run_prog(rx, prog, &xdp,
&xdp_status, tx_nq, tx);
if (consume) {
rx->packets++;
rx->bytes += length;
entry->page = NULL;
continue;
}
}
tsnep_rx_page(rx, napi, entry->page, length);
entry->page = NULL;
}
if (xdp_status)
tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
if (desc_available)
tsnep_rx_refill(rx, desc_available, false);
return done;
}
static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct tsnep_rx_entry *entry;
struct netdev_queue *tx_nq;
struct bpf_prog *prog;
struct tsnep_tx *tx;
int desc_available;
int xdp_status = 0;
struct page *page;
int done = 0;
int length;
desc_available = tsnep_rx_desc_available(rx);
prog = READ_ONCE(rx->adapter->xdp_prog);
if (prog) {
tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
rx->tx_queue_index);
tx = &rx->adapter->tx[rx->tx_queue_index];
}
while (likely(done < budget) && (rx->read != rx->write)) {
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_OWNER_COUNTER_MASK) !=
(entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
break;
done++;
if (desc_available >= TSNEP_RING_RX_REFILL) {
bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
desc_available -= tsnep_rx_refill_zc(rx, desc_available,
reuse);
if (!entry->xdp) {
/* buffer has been reused for refill to prevent
* empty RX ring, thus buffer cannot be used for
* RX processing
*/
rx->read = (rx->read + 1) & TSNEP_RING_MASK;
desc_available++;
rx->dropped++;
continue;
}
}
/* descriptor properties shall be read first, because valid data
* is signaled there
*/
dma_rmb();
prefetch(entry->xdp->data);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
xsk_buff_set_size(entry->xdp, length);
xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
/* RX metadata with timestamps is in front of actual data,
* subtract metadata size to get length of actual data and
* consider metadata size as offset of actual data during RX
* processing
*/
length -= TSNEP_RX_INLINE_METADATA_SIZE;
rx->read = (rx->read + 1) & TSNEP_RING_MASK;
desc_available++;
if (prog) {
bool consume;
entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE;
entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE;
consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp,
&xdp_status, tx_nq, tx);
if (consume) {
rx->packets++;
rx->bytes += length;
entry->xdp = NULL;
continue;
}
}
page = page_pool_dev_alloc_pages(rx->page_pool);
if (page) {
memcpy(page_address(page) + TSNEP_RX_OFFSET,
entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE,
length + TSNEP_RX_INLINE_METADATA_SIZE);
tsnep_rx_page(rx, napi, page, length);
} else {
rx->dropped++;
}
xsk_buff_free(entry->xdp);
entry->xdp = NULL;
}
if (xdp_status)
tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
if (desc_available)
desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
if (xsk_uses_need_wakeup(rx->xsk_pool)) {
if (desc_available)
xsk_set_rx_need_wakeup(rx->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx->xsk_pool);
return done;
}
return desc_available ? budget : done;
}
static bool tsnep_rx_pending(struct tsnep_rx *rx)
{
struct tsnep_rx_entry *entry;
if (rx->read != rx->write) {
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_OWNER_COUNTER_MASK) ==
(entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
return true;
}
return false;
}
static int tsnep_rx_open(struct tsnep_rx *rx)
{
int desc_available;
int retval;
retval = tsnep_rx_ring_create(rx);
if (retval)
return retval;
tsnep_rx_init(rx);
desc_available = tsnep_rx_desc_available(rx);
if (rx->xsk_pool)
retval = tsnep_rx_alloc_zc(rx, desc_available, false);
else
retval = tsnep_rx_alloc(rx, desc_available, false);
if (retval != desc_available) {
retval = -ENOMEM;
goto alloc_failed;
}
/* prealloc pages to prevent allocation failures when XSK pool is
* disabled at runtime
*/
if (rx->xsk_pool) {
retval = tsnep_rx_alloc_page_buffer(rx);
if (retval)
goto alloc_failed;
}
return 0;
alloc_failed:
tsnep_rx_ring_cleanup(rx);
return retval;
}
static void tsnep_rx_close(struct tsnep_rx *rx)
{
if (rx->xsk_pool)
tsnep_rx_free_page_buffer(rx);
tsnep_rx_ring_cleanup(rx);
}
static void tsnep_rx_reopen(struct tsnep_rx *rx)
{
struct page **page = rx->page_buffer;
int i;
tsnep_rx_init(rx);
for (i = 0; i < TSNEP_RING_SIZE; i++) {
struct tsnep_rx_entry *entry = &rx->entry[i];
/* defined initial values for properties are required for
* correct owner counter checking
*/
entry->desc->properties = 0;
entry->desc_wb->properties = 0;
/* prevent allocation failures by reusing kept pages */
if (*page) {
tsnep_rx_set_page(rx, entry, *page);
tsnep_rx_activate(rx, rx->write);
rx->write++;
*page = NULL;
page++;
}
}
}
static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
{
struct page **page = rx->page_buffer;
u32 allocated;
int i;
tsnep_rx_init(rx);
/* alloc all ring entries except the last one, because ring cannot be
* filled completely, as many buffers as possible is enough as wakeup is
* done if new buffers are available
*/
allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch,
TSNEP_RING_SIZE - 1);
for (i = 0; i < TSNEP_RING_SIZE; i++) {
struct tsnep_rx_entry *entry = &rx->entry[i];
/* keep pages to prevent allocation failures when xsk is
* disabled
*/
if (entry->page) {
*page = entry->page;
entry->page = NULL;
page++;
}
/* defined initial values for properties are required for
* correct owner counter checking
*/
entry->desc->properties = 0;
entry->desc_wb->properties = 0;
if (allocated) {
tsnep_rx_set_xdp(rx, entry,
rx->xdp_batch[allocated - 1]);
tsnep_rx_activate(rx, rx->write);
rx->write++;
allocated--;
}
}
}
static bool tsnep_pending(struct tsnep_queue *queue)
{
if (queue->tx && tsnep_tx_pending(queue->tx))
return true;
if (queue->rx && tsnep_rx_pending(queue->rx))
return true;
return false;
}
static int tsnep_poll(struct napi_struct *napi, int budget)
{
struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
napi);
bool complete = true;
int done = 0;
if (queue->tx)
complete = tsnep_tx_poll(queue->tx, budget);
/* handle case where we are called by netpoll with a budget of 0 */
if (unlikely(budget <= 0))
return budget;
if (queue->rx) {
done = queue->rx->xsk_pool ?
tsnep_rx_poll_zc(queue->rx, napi, budget) :
tsnep_rx_poll(queue->rx, napi, budget);
if (done >= budget)
complete = false;
}
/* if all work not completed, return budget and keep polling */
if (!complete)
return budget;
if (likely(napi_complete_done(napi, done))) {
tsnep_enable_irq(queue->adapter, queue->irq_mask);
/* reschedule if work is already pending, prevent rotten packets
* which are transmitted or received after polling but before
* interrupt enable
*/
if (tsnep_pending(queue)) {
tsnep_disable_irq(queue->adapter, queue->irq_mask);
napi_schedule(napi);
}
}
return min(done, budget - 1);
}
static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
{
const char *name = netdev_name(queue->adapter->netdev);
irq_handler_t handler;
void *dev;
int retval;
if (first) {
sprintf(queue->name, "%s-mac", name);
handler = tsnep_irq;
dev = queue->adapter;
} else {
if (queue->tx && queue->rx)
sprintf(queue->name, "%s-txrx-%d", name,
queue->rx->queue_index);
else if (queue->tx)
sprintf(queue->name, "%s-tx-%d", name,
queue->tx->queue_index);
else
sprintf(queue->name, "%s-rx-%d", name,
queue->rx->queue_index);
handler = tsnep_irq_txrx;
dev = queue;
}
retval = request_irq(queue->irq, handler, 0, queue->name, dev);
if (retval) {
/* if name is empty, then interrupt won't be freed */
memset(queue->name, 0, sizeof(queue->name));
}
return retval;
}
static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
{
void *dev;
if (!strlen(queue->name))
return;
if (first)
dev = queue->adapter;
else
dev = queue;
free_irq(queue->irq, dev);
memset(queue->name, 0, sizeof(queue->name));
}
static void tsnep_queue_close(struct tsnep_queue *queue, bool first)
{
struct tsnep_rx *rx = queue->rx;
tsnep_free_irq(queue, first);
if (rx) {
if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
xdp_rxq_info_unreg(&rx->xdp_rxq);
if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc))
xdp_rxq_info_unreg(&rx->xdp_rxq_zc);
}
netif_napi_del(&queue->napi);
}
static int tsnep_queue_open(struct tsnep_adapter *adapter,
struct tsnep_queue *queue, bool first)
{
struct tsnep_rx *rx = queue->rx;
struct tsnep_tx *tx = queue->tx;
int retval;
netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll);
if (rx) {
/* choose TX queue for XDP_TX */
if (tx)
rx->tx_queue_index = tx->queue_index;
else if (rx->queue_index < adapter->num_tx_queues)
rx->tx_queue_index = rx->queue_index;
else
rx->tx_queue_index = 0;
/* prepare both memory models to eliminate possible registration
* errors when memory model is switched between page pool and
* XSK pool during runtime
*/
retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
rx->queue_index, queue->napi.napi_id);
if (retval)
goto failed;
retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
MEM_TYPE_PAGE_POOL,
rx->page_pool);
if (retval)
goto failed;
retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev,
rx->queue_index, queue->napi.napi_id);
if (retval)
goto failed;
retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc,
MEM_TYPE_XSK_BUFF_POOL,
NULL);
if (retval)
goto failed;
if (rx->xsk_pool)
xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc);
}
retval = tsnep_request_irq(queue, first);
if (retval) {
netif_err(adapter, drv, adapter->netdev,
"can't get assigned irq %d.\n", queue->irq);
goto failed;
}
return 0;
failed:
tsnep_queue_close(queue, first);
return retval;
}
static void tsnep_queue_enable(struct tsnep_queue *queue)
{
napi_enable(&queue->napi);
tsnep_enable_irq(queue->adapter, queue->irq_mask);
if (queue->tx)
tsnep_tx_enable(queue->tx);
if (queue->rx)
tsnep_rx_enable(queue->rx);
}
static void tsnep_queue_disable(struct tsnep_queue *queue)
{
if (queue->tx)
tsnep_tx_disable(queue->tx, &queue->napi);
napi_disable(&queue->napi);
tsnep_disable_irq(queue->adapter, queue->irq_mask);
/* disable RX after NAPI polling has been disabled, because RX can be
* enabled during NAPI polling
*/
if (queue->rx)
tsnep_rx_disable(queue->rx);
}
static int tsnep_netdev_open(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
int i, retval;
for (i = 0; i < adapter->num_queues; i++) {
if (adapter->queue[i].tx) {
retval = tsnep_tx_open(adapter->queue[i].tx);
if (retval)
goto failed;
}
if (adapter->queue[i].rx) {
retval = tsnep_rx_open(adapter->queue[i].rx);
if (retval)
goto failed;
}
retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0);
if (retval)
goto failed;
}
retval = netif_set_real_num_tx_queues(adapter->netdev,
adapter->num_tx_queues);
if (retval)
goto failed;
retval = netif_set_real_num_rx_queues(adapter->netdev,
adapter->num_rx_queues);
if (retval)
goto failed;
tsnep_enable_irq(adapter, ECM_INT_LINK);
retval = tsnep_phy_open(adapter);
if (retval)
goto phy_failed;
for (i = 0; i < adapter->num_queues; i++)
tsnep_queue_enable(&adapter->queue[i]);
return 0;
phy_failed:
tsnep_disable_irq(adapter, ECM_INT_LINK);
failed:
for (i = 0; i < adapter->num_queues; i++) {
tsnep_queue_close(&adapter->queue[i], i == 0);
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
return retval;
}
static int tsnep_netdev_close(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
int i;
tsnep_disable_irq(adapter, ECM_INT_LINK);
tsnep_phy_close(adapter);
for (i = 0; i < adapter->num_queues; i++) {
tsnep_queue_disable(&adapter->queue[i]);
tsnep_queue_close(&adapter->queue[i], i == 0);
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
return 0;
}
int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool)
{
bool running = netif_running(queue->adapter->netdev);
u32 frame_size;
frame_size = xsk_pool_get_rx_frame_size(pool);
if (frame_size < TSNEP_XSK_RX_BUF_SIZE)
return -EOPNOTSUPP;
queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE,
sizeof(*queue->rx->page_buffer),
GFP_KERNEL);
if (!queue->rx->page_buffer)
return -ENOMEM;
queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE,
sizeof(*queue->rx->xdp_batch),
GFP_KERNEL);
if (!queue->rx->xdp_batch) {
kfree(queue->rx->page_buffer);
queue->rx->page_buffer = NULL;
return -ENOMEM;
}
xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc);
if (running)
tsnep_queue_disable(queue);
queue->tx->xsk_pool = pool;
queue->rx->xsk_pool = pool;
if (running) {
tsnep_rx_reopen_xsk(queue->rx);
tsnep_queue_enable(queue);
}
return 0;
}
void tsnep_disable_xsk(struct tsnep_queue *queue)
{
bool running = netif_running(queue->adapter->netdev);
if (running)
tsnep_queue_disable(queue);
tsnep_rx_free_zc(queue->rx);
queue->rx->xsk_pool = NULL;
queue->tx->xsk_pool = NULL;
if (running) {
tsnep_rx_reopen(queue->rx);
tsnep_queue_enable(queue);
}
kfree(queue->rx->xdp_batch);
queue->rx->xdp_batch = NULL;
kfree(queue->rx->page_buffer);
queue->rx->page_buffer = NULL;
}
static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
u16 queue_mapping = skb_get_queue_mapping(skb);
if (queue_mapping >= adapter->num_tx_queues)
queue_mapping = 0;
return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
}
static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
if (!netif_running(netdev))
return -EINVAL;
if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
return tsnep_ptp_ioctl(netdev, ifr, cmd);
return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
static void tsnep_netdev_set_multicast(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
u16 rx_filter = 0;
/* configured MAC address and broadcasts are never filtered */
if (netdev->flags & IFF_PROMISC) {
rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
} else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
}
iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
}
static void tsnep_netdev_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
u32 reg;
u32 val;
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
stats->tx_packets += adapter->tx[i].packets;
stats->tx_bytes += adapter->tx[i].bytes;
stats->tx_dropped += adapter->tx[i].dropped;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
stats->rx_packets += adapter->rx[i].packets;
stats->rx_bytes += adapter->rx[i].bytes;
stats->rx_dropped += adapter->rx[i].dropped;
stats->multicast += adapter->rx[i].multicast;
reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
TSNEP_RX_STATISTIC);
val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
stats->rx_dropped += val;
val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
stats->rx_dropped += val;
val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
stats->rx_errors += val;
stats->rx_fifo_errors += val;
val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
stats->rx_errors += val;
stats->rx_frame_errors += val;
}
reg = ioread32(adapter->addr + ECM_STAT);
val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
stats->rx_errors += val;
val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
stats->rx_errors += val;
stats->rx_crc_errors += val;
val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
stats->rx_errors += val;
}
static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
{
iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
iowrite16(*(u16 *)(addr + sizeof(u32)),
adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
ether_addr_copy(adapter->mac_address, addr);
netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
addr);
}
static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct sockaddr *sock_addr = addr;
int retval;
retval = eth_prepare_mac_addr_change(netdev, sock_addr);
if (retval)
return retval;
eth_hw_addr_set(netdev, sock_addr->sa_data);
tsnep_mac_set_address(adapter, sock_addr->sa_data);
return 0;
}
static int tsnep_netdev_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
netdev_features_t changed = netdev->features ^ features;
bool enable;
int retval = 0;
if (changed & NETIF_F_LOOPBACK) {
enable = !!(features & NETIF_F_LOOPBACK);
retval = tsnep_phy_loopback(adapter, enable);
}
return retval;
}
static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
const struct skb_shared_hwtstamps *hwtstamps,
bool cycles)
{
struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
u64 timestamp;
if (cycles)
timestamp = __le64_to_cpu(rx_inline->counter);
else
timestamp = __le64_to_cpu(rx_inline->timestamp);
return ns_to_ktime(timestamp);
}
static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct tsnep_adapter *adapter = netdev_priv(dev);
switch (bpf->command) {
case XDP_SETUP_PROG:
return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack);
case XDP_SETUP_XSK_POOL:
return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool,
bpf->xsk.queue_id);
default:
return -EOPNOTSUPP;
}
}
static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
{
if (cpu >= TSNEP_MAX_QUEUES)
cpu &= TSNEP_MAX_QUEUES - 1;
while (cpu >= adapter->num_tx_queues)
cpu -= adapter->num_tx_queues;
return &adapter->tx[cpu];
}
static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **xdp, u32 flags)
{
struct tsnep_adapter *adapter = netdev_priv(dev);
u32 cpu = smp_processor_id();
struct netdev_queue *nq;
struct tsnep_tx *tx;
int nxmit;
bool xmit;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
tx = tsnep_xdp_get_tx(adapter, cpu);
nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
__netif_tx_lock(nq, cpu);
for (nxmit = 0; nxmit < n; nxmit++) {
xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
TSNEP_TX_TYPE_XDP_NDO);
if (!xmit)
break;
/* avoid transmit queue timeout since we share it with the slow
* path
*/
txq_trans_cond_update(nq);
}
if (flags & XDP_XMIT_FLUSH)
tsnep_xdp_xmit_flush(tx);
__netif_tx_unlock(nq);
return nxmit;
}
static int tsnep_netdev_xsk_wakeup(struct net_device *dev, u32 queue_id,
u32 flags)
{
struct tsnep_adapter *adapter = netdev_priv(dev);
struct tsnep_queue *queue;
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
queue = &adapter->queue[queue_id];
if (!napi_if_scheduled_mark_missed(&queue->napi))
napi_schedule(&queue->napi);
return 0;
}
static const struct net_device_ops tsnep_netdev_ops = {
.ndo_open = tsnep_netdev_open,
.ndo_stop = tsnep_netdev_close,
.ndo_start_xmit = tsnep_netdev_xmit_frame,
.ndo_eth_ioctl = tsnep_netdev_ioctl,
.ndo_set_rx_mode = tsnep_netdev_set_multicast,
.ndo_get_stats64 = tsnep_netdev_get_stats64,
.ndo_set_mac_address = tsnep_netdev_set_mac_address,
.ndo_set_features = tsnep_netdev_set_features,
.ndo_get_tstamp = tsnep_netdev_get_tstamp,
.ndo_setup_tc = tsnep_tc_setup,
.ndo_bpf = tsnep_netdev_bpf,
.ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
.ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup,
};
static int tsnep_mac_init(struct tsnep_adapter *adapter)
{
int retval;
/* initialize RX filtering, at least configured MAC address and
* broadcast are not filtered
*/
iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
/* try to get MAC address in the following order:
* - device tree
* - valid MAC address already set
* - MAC address register if valid
* - random MAC address
*/
retval = of_get_mac_address(adapter->pdev->dev.of_node,
adapter->mac_address);
if (retval == -EPROBE_DEFER)
return retval;
if (retval && !is_valid_ether_addr(adapter->mac_address)) {
*(u32 *)adapter->mac_address =
ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
*(u16 *)(adapter->mac_address + sizeof(u32)) =
ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
if (!is_valid_ether_addr(adapter->mac_address))
eth_random_addr(adapter->mac_address);
}
tsnep_mac_set_address(adapter, adapter->mac_address);
eth_hw_addr_set(adapter->netdev, adapter->mac_address);
return 0;
}
static int tsnep_mdio_init(struct tsnep_adapter *adapter)
{
struct device_node *np = adapter->pdev->dev.of_node;
int retval;
if (np) {
np = of_get_child_by_name(np, "mdio");
if (!np)
return 0;
adapter->suppress_preamble =
of_property_read_bool(np, "suppress-preamble");
}
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
if (!adapter->mdiobus) {
retval = -ENOMEM;
goto out;
}
adapter->mdiobus->priv = (void *)adapter;
adapter->mdiobus->parent = &adapter->pdev->dev;
adapter->mdiobus->read = tsnep_mdiobus_read;
adapter->mdiobus->write = tsnep_mdiobus_write;
adapter->mdiobus->name = TSNEP "-mdiobus";
snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
adapter->pdev->name);
/* do not scan broadcast address */
adapter->mdiobus->phy_mask = 0x0000001;
retval = of_mdiobus_register(adapter->mdiobus, np);
out:
of_node_put(np);
return retval;
}
static int tsnep_phy_init(struct tsnep_adapter *adapter)
{
struct device_node *phy_node;
int retval;
retval = of_get_phy_mode(adapter->pdev->dev.of_node,
&adapter->phy_mode);
if (retval)
adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
0);
adapter->phydev = of_phy_find_device(phy_node);
of_node_put(phy_node);
if (!adapter->phydev && adapter->mdiobus)
adapter->phydev = phy_find_first(adapter->mdiobus);
if (!adapter->phydev)
return -EIO;
return 0;
}
static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
{
u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
char name[8];
int i;
int retval;
/* one TX/RX queue pair for netdev is mandatory */
if (platform_irq_count(adapter->pdev) == 1)
retval = platform_get_irq(adapter->pdev, 0);
else
retval = platform_get_irq_byname(adapter->pdev, "mac");
if (retval < 0)
return retval;
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
adapter->num_queues = 1;
adapter->queue[0].adapter = adapter;
adapter->queue[0].irq = retval;
adapter->queue[0].tx = &adapter->tx[0];
adapter->queue[0].tx->adapter = adapter;
adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0);
adapter->queue[0].tx->queue_index = 0;
adapter->queue[0].rx = &adapter->rx[0];
adapter->queue[0].rx->adapter = adapter;
adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0);
adapter->queue[0].rx->queue_index = 0;
adapter->queue[0].irq_mask = irq_mask;
adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY;
retval = tsnep_set_irq_coalesce(&adapter->queue[0],
TSNEP_COALESCE_USECS_DEFAULT);
if (retval < 0)
return retval;
adapter->netdev->irq = adapter->queue[0].irq;
/* add additional TX/RX queue pairs only if dedicated interrupt is
* available
*/
for (i = 1; i < queue_count; i++) {
sprintf(name, "txrx-%d", i);
retval = platform_get_irq_byname_optional(adapter->pdev, name);
if (retval < 0)
break;
adapter->num_tx_queues++;
adapter->num_rx_queues++;
adapter->num_queues++;
adapter->queue[i].adapter = adapter;
adapter->queue[i].irq = retval;
adapter->queue[i].tx = &adapter->tx[i];
adapter->queue[i].tx->adapter = adapter;
adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i);
adapter->queue[i].tx->queue_index = i;
adapter->queue[i].rx = &adapter->rx[i];
adapter->queue[i].rx->adapter = adapter;
adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i);
adapter->queue[i].rx->queue_index = i;
adapter->queue[i].irq_mask =
irq_mask << (ECM_INT_TXRX_SHIFT * i);
adapter->queue[i].irq_delay_addr =
adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i;
retval = tsnep_set_irq_coalesce(&adapter->queue[i],
TSNEP_COALESCE_USECS_DEFAULT);
if (retval < 0)
return retval;
}
return 0;
}
static int tsnep_probe(struct platform_device *pdev)
{
struct tsnep_adapter *adapter;
struct net_device *netdev;
struct resource *io;
u32 type;
int revision;
int version;
int queue_count;
int retval;
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
sizeof(struct tsnep_adapter),
TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
if (!netdev)
return -ENODEV;
SET_NETDEV_DEV(netdev, &pdev->dev);
adapter = netdev_priv(netdev);
platform_set_drvdata(pdev, adapter);
adapter->pdev = pdev;
adapter->dmadev = &pdev->dev;
adapter->netdev = netdev;
adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
mutex_init(&adapter->gate_control_lock);
mutex_init(&adapter->rxnfc_lock);
INIT_LIST_HEAD(&adapter->rxnfc_rules);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
adapter->addr = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
netdev->mem_start = io->start;
netdev->mem_end = io->end;
type = ioread32(adapter->addr + ECM_TYPE);
revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
adapter->gate_control = type & ECM_GATE_CONTROL;
adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
tsnep_disable_irq(adapter, ECM_INT_ALL);
retval = tsnep_queue_init(adapter, queue_count);
if (retval)
return retval;
retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
DMA_BIT_MASK(64));
if (retval) {
dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
return retval;
}
retval = tsnep_mac_init(adapter);
if (retval)
return retval;
retval = tsnep_mdio_init(adapter);
if (retval)
goto mdio_init_failed;
retval = tsnep_phy_init(adapter);
if (retval)
goto phy_init_failed;
retval = tsnep_ptp_init(adapter);
if (retval)
goto ptp_init_failed;
retval = tsnep_tc_init(adapter);
if (retval)
goto tc_init_failed;
retval = tsnep_rxnfc_init(adapter);
if (retval)
goto rxnfc_init_failed;
netdev->netdev_ops = &tsnep_netdev_ops;
netdev->ethtool_ops = &tsnep_ethtool_ops;
netdev->features = NETIF_F_SG;
netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG |
NETDEV_XDP_ACT_XSK_ZEROCOPY;
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
retval = register_netdev(netdev);
if (retval)
goto register_failed;
dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
revision);
if (adapter->gate_control)
dev_info(&adapter->pdev->dev, "gate control detected\n");
return 0;
register_failed:
tsnep_rxnfc_cleanup(adapter);
rxnfc_init_failed:
tsnep_tc_cleanup(adapter);
tc_init_failed:
tsnep_ptp_cleanup(adapter);
ptp_init_failed:
phy_init_failed:
if (adapter->mdiobus)
mdiobus_unregister(adapter->mdiobus);
mdio_init_failed:
return retval;
}
static int tsnep_remove(struct platform_device *pdev)
{
struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
unregister_netdev(adapter->netdev);
tsnep_rxnfc_cleanup(adapter);
tsnep_tc_cleanup(adapter);
tsnep_ptp_cleanup(adapter);
if (adapter->mdiobus)
mdiobus_unregister(adapter->mdiobus);
tsnep_disable_irq(adapter, ECM_INT_ALL);
return 0;
}
static const struct of_device_id tsnep_of_match[] = {
{ .compatible = "engleder,tsnep", },
{ },
};
MODULE_DEVICE_TABLE(of, tsnep_of_match);
static struct platform_driver tsnep_driver = {
.driver = {
.name = TSNEP,
.of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
.remove = tsnep_remove,
};
module_platform_driver(tsnep_driver);
MODULE_AUTHOR("Gerhard Engleder <[email protected]>");
MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/net/ethernet/engleder/tsnep_main.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Gerhard Engleder <[email protected]> */
#include "tsnep.h"
#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
static void tsnep_enable_rule(struct tsnep_adapter *adapter,
struct tsnep_rxnfc_rule *rule)
{
u8 rx_assign;
void __iomem *addr;
rx_assign = TSNEP_RX_ASSIGN_ACTIVE;
rx_assign |= (rule->queue_index << TSNEP_RX_ASSIGN_QUEUE_SHIFT) &
TSNEP_RX_ASSIGN_QUEUE_MASK;
addr = adapter->addr + TSNEP_RX_ASSIGN_ETHER_TYPE +
TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET * rule->location;
iowrite16(rule->filter.ether_type, addr);
/* enable rule after all settings are done */
addr = adapter->addr + TSNEP_RX_ASSIGN +
TSNEP_RX_ASSIGN_OFFSET * rule->location;
iowrite8(rx_assign, addr);
}
static void tsnep_disable_rule(struct tsnep_adapter *adapter,
struct tsnep_rxnfc_rule *rule)
{
void __iomem *addr;
addr = adapter->addr + TSNEP_RX_ASSIGN +
TSNEP_RX_ASSIGN_OFFSET * rule->location;
iowrite8(0, addr);
}
static struct tsnep_rxnfc_rule *tsnep_get_rule(struct tsnep_adapter *adapter,
int location)
{
struct tsnep_rxnfc_rule *rule;
list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
if (rule->location == location)
return rule;
if (rule->location > location)
break;
}
return NULL;
}
static void tsnep_add_rule(struct tsnep_adapter *adapter,
struct tsnep_rxnfc_rule *rule)
{
struct tsnep_rxnfc_rule *pred, *cur;
tsnep_enable_rule(adapter, rule);
pred = NULL;
list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
if (cur->location >= rule->location)
break;
pred = cur;
}
list_add(&rule->list, pred ? &pred->list : &adapter->rxnfc_rules);
adapter->rxnfc_count++;
}
static void tsnep_delete_rule(struct tsnep_adapter *adapter,
struct tsnep_rxnfc_rule *rule)
{
tsnep_disable_rule(adapter, rule);
list_del(&rule->list);
adapter->rxnfc_count--;
kfree(rule);
}
static void tsnep_flush_rules(struct tsnep_adapter *adapter)
{
struct tsnep_rxnfc_rule *rule, *tmp;
mutex_lock(&adapter->rxnfc_lock);
list_for_each_entry_safe(rule, tmp, &adapter->rxnfc_rules, list)
tsnep_delete_rule(adapter, rule);
mutex_unlock(&adapter->rxnfc_lock);
}
int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct tsnep_rxnfc_rule *rule = NULL;
cmd->data = adapter->rxnfc_max;
mutex_lock(&adapter->rxnfc_lock);
rule = tsnep_get_rule(adapter, fsp->location);
if (!rule) {
mutex_unlock(&adapter->rxnfc_lock);
return -ENOENT;
}
fsp->flow_type = ETHER_FLOW;
fsp->ring_cookie = rule->queue_index;
if (rule->filter.type == TSNEP_RXNFC_ETHER_TYPE) {
fsp->h_u.ether_spec.h_proto = htons(rule->filter.ether_type);
fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
}
mutex_unlock(&adapter->rxnfc_lock);
return 0;
}
int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct tsnep_rxnfc_rule *rule;
int count = 0;
cmd->data = adapter->rxnfc_max;
mutex_lock(&adapter->rxnfc_lock);
list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
if (count == cmd->rule_cnt) {
mutex_unlock(&adapter->rxnfc_lock);
return -EMSGSIZE;
}
rule_locs[count] = rule->location;
count++;
}
mutex_unlock(&adapter->rxnfc_lock);
cmd->rule_cnt = count;
return 0;
}
static int tsnep_rxnfc_find_location(struct tsnep_adapter *adapter)
{
struct tsnep_rxnfc_rule *tmp;
int location = 0;
list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
if (tmp->location == location)
location++;
else
return location;
}
if (location >= adapter->rxnfc_max)
return -ENOSPC;
return location;
}
static void tsnep_rxnfc_init_rule(struct tsnep_rxnfc_rule *rule,
const struct ethtool_rx_flow_spec *fsp)
{
INIT_LIST_HEAD(&rule->list);
rule->queue_index = fsp->ring_cookie;
rule->location = fsp->location;
rule->filter.type = TSNEP_RXNFC_ETHER_TYPE;
rule->filter.ether_type = ntohs(fsp->h_u.ether_spec.h_proto);
}
static int tsnep_rxnfc_check_rule(struct tsnep_adapter *adapter,
struct tsnep_rxnfc_rule *rule)
{
struct net_device *dev = adapter->netdev;
struct tsnep_rxnfc_rule *tmp;
list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) &&
tmp->location != rule->location) {
netdev_dbg(dev, "rule already exists\n");
return -EEXIST;
}
}
return 0;
}
int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
struct net_device *netdev = adapter->netdev;
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct tsnep_rxnfc_rule *rule, *old_rule;
int retval;
/* only EtherType is supported */
if (fsp->flow_type != ETHER_FLOW ||
!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) ||
!is_zero_ether_addr(fsp->m_u.ether_spec.h_source) ||
fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK) {
netdev_dbg(netdev, "only ethernet protocol is supported\n");
return -EOPNOTSUPP;
}
if (fsp->ring_cookie >
(TSNEP_RX_ASSIGN_QUEUE_MASK >> TSNEP_RX_ASSIGN_QUEUE_SHIFT)) {
netdev_dbg(netdev, "invalid action\n");
return -EINVAL;
}
if (fsp->location != RX_CLS_LOC_ANY &&
fsp->location >= adapter->rxnfc_max) {
netdev_dbg(netdev, "invalid location\n");
return -EINVAL;
}
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
mutex_lock(&adapter->rxnfc_lock);
if (fsp->location == RX_CLS_LOC_ANY) {
retval = tsnep_rxnfc_find_location(adapter);
if (retval < 0)
goto failed;
fsp->location = retval;
}
tsnep_rxnfc_init_rule(rule, fsp);
retval = tsnep_rxnfc_check_rule(adapter, rule);
if (retval)
goto failed;
old_rule = tsnep_get_rule(adapter, fsp->location);
if (old_rule)
tsnep_delete_rule(adapter, old_rule);
tsnep_add_rule(adapter, rule);
mutex_unlock(&adapter->rxnfc_lock);
return 0;
failed:
mutex_unlock(&adapter->rxnfc_lock);
kfree(rule);
return retval;
}
int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct tsnep_rxnfc_rule *rule;
mutex_lock(&adapter->rxnfc_lock);
rule = tsnep_get_rule(adapter, fsp->location);
if (!rule) {
mutex_unlock(&adapter->rxnfc_lock);
return -ENOENT;
}
tsnep_delete_rule(adapter, rule);
mutex_unlock(&adapter->rxnfc_lock);
return 0;
}
int tsnep_rxnfc_init(struct tsnep_adapter *adapter)
{
int i;
/* disable all rules */
for (i = 0; i < adapter->rxnfc_max;
i += sizeof(u32) / TSNEP_RX_ASSIGN_OFFSET)
iowrite32(0, adapter->addr + TSNEP_RX_ASSIGN + i);
return 0;
}
void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter)
{
tsnep_flush_rules(adapter);
}
|
linux-master
|
drivers/net/ethernet/engleder/tsnep_rxnfc.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022 Gerhard Engleder <[email protected]> */
#include <linux/if_vlan.h>
#include <net/xdp_sock_drv.h>
#include "tsnep.h"
int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct bpf_prog *old_prog;
old_prog = xchg(&adapter->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
return 0;
}
static int tsnep_xdp_enable_pool(struct tsnep_adapter *adapter,
struct xsk_buff_pool *pool, u16 queue_id)
{
struct tsnep_queue *queue;
int retval;
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
queue = &adapter->queue[queue_id];
if (queue->rx->queue_index != queue_id ||
queue->tx->queue_index != queue_id) {
netdev_err(adapter->netdev,
"XSK support only for TX/RX queue pairs\n");
return -EOPNOTSUPP;
}
retval = xsk_pool_dma_map(pool, adapter->dmadev,
DMA_ATTR_SKIP_CPU_SYNC);
if (retval) {
netdev_err(adapter->netdev, "failed to map XSK pool\n");
return retval;
}
retval = tsnep_enable_xsk(queue, pool);
if (retval) {
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
return retval;
}
return 0;
}
static int tsnep_xdp_disable_pool(struct tsnep_adapter *adapter, u16 queue_id)
{
struct xsk_buff_pool *pool;
struct tsnep_queue *queue;
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
if (!pool)
return -EINVAL;
queue = &adapter->queue[queue_id];
tsnep_disable_xsk(queue);
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
return 0;
}
int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
struct xsk_buff_pool *pool, u16 queue_id)
{
return pool ? tsnep_xdp_enable_pool(adapter, pool, queue_id) :
tsnep_xdp_disable_pool(adapter, queue_id);
}
|
linux-master
|
drivers/net/ethernet/engleder/tsnep_xdp.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Gerhard Engleder <[email protected]> */
#include "tsnep.h"
#include <net/pkt_sched.h>
enum tsnep_test {
TSNEP_TEST_ENABLE = 0,
TSNEP_TEST_TAPRIO,
TSNEP_TEST_TAPRIO_CHANGE,
TSNEP_TEST_TAPRIO_EXTENSION,
};
static const char tsnep_test_strings[][ETH_GSTRING_LEN] = {
"Enable timeout (offline)",
"TAPRIO (offline)",
"TAPRIO change (offline)",
"TAPRIO extension (offline)",
};
#define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN)
static bool enable_gc_timeout(struct tsnep_adapter *adapter)
{
iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE))
return false;
return true;
}
static bool gc_timeout_signaled(struct tsnep_adapter *adapter)
{
if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL)
return true;
return false;
}
static bool ack_gc_timeout(struct tsnep_adapter *adapter)
{
iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
if (ioread32(adapter->addr + TSNEP_GC) &
(TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL))
return false;
return true;
}
static bool enable_gc(struct tsnep_adapter *adapter, bool a)
{
u8 enable;
u8 active;
if (a) {
enable = TSNEP_GC_ENABLE_A;
active = TSNEP_GC_ACTIVE_A;
} else {
enable = TSNEP_GC_ENABLE_B;
active = TSNEP_GC_ACTIVE_B;
}
iowrite8(enable, adapter->addr + TSNEP_GC);
if (!(ioread32(adapter->addr + TSNEP_GC) & active))
return false;
return true;
}
static bool disable_gc(struct tsnep_adapter *adapter)
{
iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
if (ioread32(adapter->addr + TSNEP_GC) &
(TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B))
return false;
return true;
}
static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay)
{
u64 before, after;
u32 time;
bool enabled;
if (!disable_gc(adapter))
return false;
before = ktime_get_ns();
if (!enable_gc_timeout(adapter))
return false;
/* for start time after timeout, the timeout can guarantee, that enable
* is blocked if too late
*/
time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
time += TSNEP_GC_TIMEOUT;
iowrite32(time, adapter->addr + TSNEP_GC_TIME);
ndelay(delay);
enabled = enable_gc(adapter, a);
after = ktime_get_ns();
if (delay > TSNEP_GC_TIMEOUT) {
/* timeout must have blocked enable */
if (enabled)
return false;
} else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) {
/* timeout must not have blocked enable */
if (!enabled)
return false;
}
if (enabled) {
if (gc_timeout_signaled(adapter))
return false;
} else {
if (!gc_timeout_signaled(adapter))
return false;
if (!ack_gc_timeout(adapter))
return false;
}
if (!disable_gc(adapter))
return false;
return true;
}
static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter)
{
int i;
iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0);
iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4);
for (i = 0; i < 200000; i += 100) {
if (!gc_delayed_enable(adapter, true, i))
return false;
}
iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0);
iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4);
for (i = 0; i < 200000; i += 100) {
if (!gc_delayed_enable(adapter, false, i))
return false;
}
return true;
}
static void delay_base_time(struct tsnep_adapter *adapter,
struct tc_taprio_qopt_offload *qopt, s64 ms)
{
u64 system_time;
u64 base_time = ktime_to_ns(qopt->base_time);
u64 n;
tsnep_get_system_time(adapter, &system_time);
system_time += ms * 1000000;
n = div64_u64(system_time - base_time, qopt->cycle_time);
qopt->base_time = ktime_add_ns(qopt->base_time,
(n + 1) * qopt->cycle_time);
}
static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time,
u64 *system_time)
{
u32 time_high_before;
u32 time_low;
u32 time_high;
u32 gc_time_before;
time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
*gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
do {
time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
*gc = ioread32(adapter->addr + TSNEP_GC);
gc_time_before = *gc_time;
*gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
time_high_before = time_high;
time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
} while ((time_high != time_high_before) ||
(*gc_time != gc_time_before));
*system_time = (((u64)time_high) << 32) | ((u64)time_low);
}
static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next)
{
u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time);
u64 cycle_start = gcl->base_time + gcl->cycle_time * n;
int i;
*next = cycle_start;
for (i = 0; i < gcl->count; i++) {
*next += gcl->operation[i].interval;
if (*next > system_time)
break;
}
return i;
}
static bool check_gate(struct tsnep_adapter *adapter)
{
u32 gc_time;
u32 gc;
u64 system_time;
struct tsnep_gcl *curr;
struct tsnep_gcl *prev;
u64 next_time;
u8 gate_open;
u8 next_gate_open;
get_gate_state(adapter, &gc, &gc_time, &system_time);
if (gc & TSNEP_GC_ACTIVE_A) {
curr = &adapter->gcl[0];
prev = &adapter->gcl[1];
} else if (gc & TSNEP_GC_ACTIVE_B) {
curr = &adapter->gcl[1];
prev = &adapter->gcl[0];
} else {
return false;
}
if (curr->start_time <= system_time) {
/* GCL is already active */
int index;
index = get_operation(curr, system_time, &next_time);
gate_open = curr->operation[index].properties & TSNEP_GCL_MASK;
if (index == curr->count - 1)
index = 0;
else
index++;
next_gate_open =
curr->operation[index].properties & TSNEP_GCL_MASK;
} else if (curr->change) {
/* operation of previous GCL is active */
int index;
u64 start_before;
u64 n;
index = get_operation(prev, system_time, &next_time);
next_time = curr->start_time;
start_before = prev->base_time;
n = div64_u64(curr->start_time - start_before,
prev->cycle_time);
start_before += n * prev->cycle_time;
if (curr->start_time == start_before)
start_before -= prev->cycle_time;
if (((start_before + prev->cycle_time_extension) >=
curr->start_time) &&
(curr->start_time - prev->cycle_time_extension <=
system_time)) {
/* extend */
index = prev->count - 1;
}
gate_open = prev->operation[index].properties & TSNEP_GCL_MASK;
next_gate_open =
curr->operation[0].properties & TSNEP_GCL_MASK;
} else {
/* GCL is waiting for start */
next_time = curr->start_time;
gate_open = 0xFF;
next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK;
}
if (gc_time != (next_time & 0xFFFFFFFF)) {
dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n",
gc_time, next_time);
return false;
}
if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) {
dev_err(&adapter->pdev->dev,
"gate control open 0x%02x!=0x%02x\n",
((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT),
gate_open);
return false;
}
if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) !=
next_gate_open) {
dev_err(&adapter->pdev->dev,
"gate control next open 0x%02x!=0x%02x\n",
((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT),
next_gate_open);
return false;
}
return true;
}
static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms)
{
ktime_t start = ktime_get();
do {
if (!check_gate(adapter))
return false;
} while (ktime_ms_delta(ktime_get(), start) < ms);
return true;
}
static bool enable_check_taprio(struct tsnep_adapter *adapter,
struct tc_taprio_qopt_offload *qopt, s64 ms)
{
int retval;
retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt);
if (retval)
return false;
if (!check_gate_duration(adapter, ms))
return false;
return true;
}
static bool disable_taprio(struct tsnep_adapter *adapter)
{
struct tc_taprio_qopt_offload qopt;
int retval;
memset(&qopt, 0, sizeof(qopt));
qopt.cmd = TAPRIO_CMD_DESTROY;
retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt);
if (retval)
return false;
return true;
}
static bool run_taprio(struct tsnep_adapter *adapter,
struct tc_taprio_qopt_offload *qopt, s64 ms)
{
if (!enable_check_taprio(adapter, qopt, ms))
return false;
if (!disable_taprio(adapter))
return false;
return true;
}
static bool tsnep_test_taprio(struct tsnep_adapter *adapter)
{
struct tc_taprio_qopt_offload *qopt;
int i;
qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
if (!qopt)
return false;
for (i = 0; i < 255; i++)
qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
qopt->cmd = TAPRIO_CMD_REPLACE;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 1500000;
qopt->cycle_time_extension = 0;
qopt->entries[0].gate_mask = 0x02;
qopt->entries[0].interval = 200000;
qopt->entries[1].gate_mask = 0x03;
qopt->entries[1].interval = 800000;
qopt->entries[2].gate_mask = 0x07;
qopt->entries[2].interval = 240000;
qopt->entries[3].gate_mask = 0x01;
qopt->entries[3].interval = 80000;
qopt->entries[4].gate_mask = 0x04;
qopt->entries[4].interval = 70000;
qopt->entries[5].gate_mask = 0x06;
qopt->entries[5].interval = 60000;
qopt->entries[6].gate_mask = 0x0F;
qopt->entries[6].interval = 50000;
qopt->num_entries = 7;
if (!run_taprio(adapter, qopt, 100))
goto failed;
qopt->cmd = TAPRIO_CMD_REPLACE;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 411854;
qopt->cycle_time_extension = 0;
qopt->entries[0].gate_mask = 0x17;
qopt->entries[0].interval = 23842;
qopt->entries[1].gate_mask = 0x16;
qopt->entries[1].interval = 13482;
qopt->entries[2].gate_mask = 0x15;
qopt->entries[2].interval = 49428;
qopt->entries[3].gate_mask = 0x14;
qopt->entries[3].interval = 38189;
qopt->entries[4].gate_mask = 0x13;
qopt->entries[4].interval = 92321;
qopt->entries[5].gate_mask = 0x12;
qopt->entries[5].interval = 71239;
qopt->entries[6].gate_mask = 0x11;
qopt->entries[6].interval = 69932;
qopt->entries[7].gate_mask = 0x10;
qopt->entries[7].interval = 53421;
qopt->num_entries = 8;
if (!run_taprio(adapter, qopt, 100))
goto failed;
qopt->cmd = TAPRIO_CMD_REPLACE;
qopt->base_time = ktime_set(0, 0);
delay_base_time(adapter, qopt, 12);
qopt->cycle_time = 125000;
qopt->cycle_time_extension = 0;
qopt->entries[0].gate_mask = 0x27;
qopt->entries[0].interval = 15000;
qopt->entries[1].gate_mask = 0x26;
qopt->entries[1].interval = 15000;
qopt->entries[2].gate_mask = 0x25;
qopt->entries[2].interval = 12500;
qopt->entries[3].gate_mask = 0x24;
qopt->entries[3].interval = 17500;
qopt->entries[4].gate_mask = 0x23;
qopt->entries[4].interval = 10000;
qopt->entries[5].gate_mask = 0x22;
qopt->entries[5].interval = 11000;
qopt->entries[6].gate_mask = 0x21;
qopt->entries[6].interval = 9000;
qopt->entries[7].gate_mask = 0x20;
qopt->entries[7].interval = 10000;
qopt->entries[8].gate_mask = 0x20;
qopt->entries[8].interval = 12500;
qopt->entries[9].gate_mask = 0x20;
qopt->entries[9].interval = 12500;
qopt->num_entries = 10;
if (!run_taprio(adapter, qopt, 100))
goto failed;
kfree(qopt);
return true;
failed:
disable_taprio(adapter);
kfree(qopt);
return false;
}
static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter)
{
struct tc_taprio_qopt_offload *qopt;
int i;
qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
if (!qopt)
return false;
for (i = 0; i < 255; i++)
qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
qopt->cmd = TAPRIO_CMD_REPLACE;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 100000;
qopt->cycle_time_extension = 0;
qopt->entries[0].gate_mask = 0x30;
qopt->entries[0].interval = 20000;
qopt->entries[1].gate_mask = 0x31;
qopt->entries[1].interval = 80000;
qopt->num_entries = 2;
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to identical */
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
delay_base_time(adapter, qopt, 17);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to same cycle time */
qopt->base_time = ktime_set(0, 0);
qopt->entries[0].gate_mask = 0x42;
qopt->entries[1].gate_mask = 0x43;
delay_base_time(adapter, qopt, 2);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 0);
qopt->entries[0].gate_mask = 0x54;
qopt->entries[0].interval = 33333;
qopt->entries[1].gate_mask = 0x55;
qopt->entries[1].interval = 66667;
delay_base_time(adapter, qopt, 23);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 0);
qopt->entries[0].gate_mask = 0x66;
qopt->entries[0].interval = 50000;
qopt->entries[1].gate_mask = 0x67;
qopt->entries[1].interval = 25000;
qopt->entries[2].gate_mask = 0x68;
qopt->entries[2].interval = 25000;
qopt->num_entries = 3;
delay_base_time(adapter, qopt, 11);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to multiple of cycle time */
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 200000;
qopt->entries[0].gate_mask = 0x79;
qopt->entries[0].interval = 50000;
qopt->entries[1].gate_mask = 0x7A;
qopt->entries[1].interval = 150000;
qopt->num_entries = 2;
delay_base_time(adapter, qopt, 11);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 1000000;
qopt->entries[0].gate_mask = 0x7B;
qopt->entries[0].interval = 125000;
qopt->entries[1].gate_mask = 0x7C;
qopt->entries[1].interval = 250000;
qopt->entries[2].gate_mask = 0x7D;
qopt->entries[2].interval = 375000;
qopt->entries[3].gate_mask = 0x7E;
qopt->entries[3].interval = 250000;
qopt->num_entries = 4;
delay_base_time(adapter, qopt, 3);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to shorter cycle time */
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 333333;
qopt->entries[0].gate_mask = 0x8F;
qopt->entries[0].interval = 166666;
qopt->entries[1].gate_mask = 0x80;
qopt->entries[1].interval = 166667;
qopt->num_entries = 2;
delay_base_time(adapter, qopt, 11);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 62500;
qopt->entries[0].gate_mask = 0x81;
qopt->entries[0].interval = 31250;
qopt->entries[1].gate_mask = 0x82;
qopt->entries[1].interval = 15625;
qopt->entries[2].gate_mask = 0x83;
qopt->entries[2].interval = 15625;
qopt->num_entries = 3;
delay_base_time(adapter, qopt, 1);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to longer cycle time */
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 400000;
qopt->entries[0].gate_mask = 0x84;
qopt->entries[0].interval = 100000;
qopt->entries[1].gate_mask = 0x85;
qopt->entries[1].interval = 100000;
qopt->entries[2].gate_mask = 0x86;
qopt->entries[2].interval = 100000;
qopt->entries[3].gate_mask = 0x87;
qopt->entries[3].interval = 100000;
qopt->num_entries = 4;
delay_base_time(adapter, qopt, 7);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 1700000;
qopt->entries[0].gate_mask = 0x88;
qopt->entries[0].interval = 200000;
qopt->entries[1].gate_mask = 0x89;
qopt->entries[1].interval = 300000;
qopt->entries[2].gate_mask = 0x8A;
qopt->entries[2].interval = 600000;
qopt->entries[3].gate_mask = 0x8B;
qopt->entries[3].interval = 100000;
qopt->entries[4].gate_mask = 0x8C;
qopt->entries[4].interval = 500000;
qopt->num_entries = 5;
delay_base_time(adapter, qopt, 6);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
if (!disable_taprio(adapter))
goto failed;
kfree(qopt);
return true;
failed:
disable_taprio(adapter);
kfree(qopt);
return false;
}
static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter)
{
struct tc_taprio_qopt_offload *qopt;
int i;
qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
if (!qopt)
return false;
for (i = 0; i < 255; i++)
qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
qopt->cmd = TAPRIO_CMD_REPLACE;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 100000;
qopt->cycle_time_extension = 50000;
qopt->entries[0].gate_mask = 0x90;
qopt->entries[0].interval = 20000;
qopt->entries[1].gate_mask = 0x91;
qopt->entries[1].interval = 80000;
qopt->num_entries = 2;
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to different phase */
qopt->base_time = ktime_set(0, 50000);
qopt->entries[0].gate_mask = 0x92;
qopt->entries[0].interval = 33000;
qopt->entries[1].gate_mask = 0x93;
qopt->entries[1].interval = 67000;
qopt->num_entries = 2;
delay_base_time(adapter, qopt, 2);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to different phase and longer cycle time */
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 1000000;
qopt->cycle_time_extension = 700000;
qopt->entries[0].gate_mask = 0x94;
qopt->entries[0].interval = 400000;
qopt->entries[1].gate_mask = 0x95;
qopt->entries[1].interval = 600000;
qopt->num_entries = 2;
delay_base_time(adapter, qopt, 7);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 700000);
qopt->cycle_time = 2000000;
qopt->cycle_time_extension = 1900000;
qopt->entries[0].gate_mask = 0x96;
qopt->entries[0].interval = 400000;
qopt->entries[1].gate_mask = 0x97;
qopt->entries[1].interval = 1600000;
qopt->num_entries = 2;
delay_base_time(adapter, qopt, 9);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to different phase and shorter cycle time */
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 1500000;
qopt->cycle_time_extension = 700000;
qopt->entries[0].gate_mask = 0x98;
qopt->entries[0].interval = 400000;
qopt->entries[1].gate_mask = 0x99;
qopt->entries[1].interval = 600000;
qopt->entries[2].gate_mask = 0x9A;
qopt->entries[2].interval = 500000;
qopt->num_entries = 3;
delay_base_time(adapter, qopt, 3);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 100000);
qopt->cycle_time = 500000;
qopt->cycle_time_extension = 300000;
qopt->entries[0].gate_mask = 0x9B;
qopt->entries[0].interval = 150000;
qopt->entries[1].gate_mask = 0x9C;
qopt->entries[1].interval = 350000;
qopt->num_entries = 2;
delay_base_time(adapter, qopt, 9);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
/* change to different cycle time */
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 1000000;
qopt->cycle_time_extension = 700000;
qopt->entries[0].gate_mask = 0xAD;
qopt->entries[0].interval = 400000;
qopt->entries[1].gate_mask = 0xAE;
qopt->entries[1].interval = 300000;
qopt->entries[2].gate_mask = 0xAF;
qopt->entries[2].interval = 300000;
qopt->num_entries = 3;
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 400000;
qopt->cycle_time_extension = 100000;
qopt->entries[0].gate_mask = 0xA0;
qopt->entries[0].interval = 200000;
qopt->entries[1].gate_mask = 0xA1;
qopt->entries[1].interval = 200000;
qopt->num_entries = 2;
delay_base_time(adapter, qopt, 19);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 500000;
qopt->cycle_time_extension = 499999;
qopt->entries[0].gate_mask = 0xB2;
qopt->entries[0].interval = 100000;
qopt->entries[1].gate_mask = 0xB3;
qopt->entries[1].interval = 100000;
qopt->entries[2].gate_mask = 0xB4;
qopt->entries[2].interval = 100000;
qopt->entries[3].gate_mask = 0xB5;
qopt->entries[3].interval = 200000;
qopt->num_entries = 4;
delay_base_time(adapter, qopt, 19);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
qopt->base_time = ktime_set(0, 0);
qopt->cycle_time = 6000000;
qopt->cycle_time_extension = 5999999;
qopt->entries[0].gate_mask = 0xC6;
qopt->entries[0].interval = 1000000;
qopt->entries[1].gate_mask = 0xC7;
qopt->entries[1].interval = 1000000;
qopt->entries[2].gate_mask = 0xC8;
qopt->entries[2].interval = 1000000;
qopt->entries[3].gate_mask = 0xC9;
qopt->entries[3].interval = 1500000;
qopt->entries[4].gate_mask = 0xCA;
qopt->entries[4].interval = 1500000;
qopt->num_entries = 5;
delay_base_time(adapter, qopt, 1);
if (!enable_check_taprio(adapter, qopt, 100))
goto failed;
if (!disable_taprio(adapter))
goto failed;
kfree(qopt);
return true;
failed:
disable_taprio(adapter);
kfree(qopt);
return false;
}
int tsnep_ethtool_get_test_count(void)
{
return TSNEP_TEST_COUNT;
}
void tsnep_ethtool_get_test_strings(u8 *data)
{
memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings));
}
void tsnep_ethtool_self_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
eth_test->len = TSNEP_TEST_COUNT;
if (eth_test->flags != ETH_TEST_FL_OFFLINE) {
/* no tests are done online */
data[TSNEP_TEST_ENABLE] = 0;
data[TSNEP_TEST_TAPRIO] = 0;
data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
return;
}
if (tsnep_test_gc_enable(adapter)) {
data[TSNEP_TEST_ENABLE] = 0;
} else {
eth_test->flags |= ETH_TEST_FL_FAILED;
data[TSNEP_TEST_ENABLE] = 1;
}
if (tsnep_test_taprio(adapter)) {
data[TSNEP_TEST_TAPRIO] = 0;
} else {
eth_test->flags |= ETH_TEST_FL_FAILED;
data[TSNEP_TEST_TAPRIO] = 1;
}
if (tsnep_test_taprio_change(adapter)) {
data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
} else {
eth_test->flags |= ETH_TEST_FL_FAILED;
data[TSNEP_TEST_TAPRIO_CHANGE] = 1;
}
if (tsnep_test_taprio_extension(adapter)) {
data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
} else {
eth_test->flags |= ETH_TEST_FL_FAILED;
data[TSNEP_TEST_TAPRIO_EXTENSION] = 1;
}
}
|
linux-master
|
drivers/net/ethernet/engleder/tsnep_selftests.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Gerhard Engleder <[email protected]> */
#include "tsnep.h"
#include <net/pkt_sched.h>
/* save one operation at the end for additional operation at list change */
#define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
{
int i;
u64 cycle_time;
if (!qopt->cycle_time)
return -ERANGE;
if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
return -EINVAL;
cycle_time = 0;
for (i = 0; i < qopt->num_entries; i++) {
if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
return -EINVAL;
if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
return -EINVAL;
if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
return -EINVAL;
cycle_time += qopt->entries[i].interval;
}
if (qopt->cycle_time != cycle_time)
return -EINVAL;
if (qopt->cycle_time_extension >= qopt->cycle_time)
return -EINVAL;
return 0;
}
static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
u32 properties, u32 interval, bool flush)
{
void __iomem *addr = gcl->addr +
sizeof(struct tsnep_gcl_operation) * index;
gcl->operation[index].properties = properties;
gcl->operation[index].interval = interval;
iowrite32(properties, addr);
iowrite32(interval, addr + sizeof(u32));
if (flush) {
/* flush write with read access */
ioread32(addr);
}
}
static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
{
u64 duration;
int count;
/* change needs to be triggered one or two operations before start of
* new gate control list
* - change is triggered at start of operation (minimum one operation)
* - operation with adjusted interval is inserted on demand to exactly
* meet the start of the new gate control list (optional)
*
* additionally properties are read directly after start of previous
* operation
*
* therefore, three operations needs to be considered for the limit
*/
duration = 0;
count = 3;
while (count) {
duration += gcl->operation[index].interval;
index--;
if (index < 0)
index = gcl->count - 1;
count--;
}
return duration;
}
static void tsnep_write_gcl(struct tsnep_gcl *gcl,
struct tc_taprio_qopt_offload *qopt)
{
int i;
u32 properties;
u64 extend;
u64 cut;
gcl->base_time = ktime_to_ns(qopt->base_time);
gcl->cycle_time = qopt->cycle_time;
gcl->cycle_time_extension = qopt->cycle_time_extension;
for (i = 0; i < qopt->num_entries; i++) {
properties = qopt->entries[i].gate_mask;
if (i == (qopt->num_entries - 1))
properties |= TSNEP_GCL_LAST;
tsnep_write_gcl_operation(gcl, i, properties,
qopt->entries[i].interval, true);
}
gcl->count = qopt->num_entries;
/* calculate change limit; i.e., the time needed between enable and
* start of new gate control list
*/
/* case 1: extend cycle time for change
* - change duration of last operation
* - cycle time extension
*/
extend = tsnep_change_duration(gcl, gcl->count - 1);
extend += gcl->cycle_time_extension;
/* case 2: cut cycle time for change
* - maximum change duration
*/
cut = 0;
for (i = 0; i < gcl->count; i++)
cut = max(cut, tsnep_change_duration(gcl, i));
/* use maximum, because the actual case (extend or cut) can be
* determined only after limit is known (chicken-and-egg problem)
*/
gcl->change_limit = max(extend, cut);
}
static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
{
u64 start = gcl->base_time;
u64 n;
if (start <= limit) {
n = div64_u64(limit - start, gcl->cycle_time);
start += (n + 1) * gcl->cycle_time;
}
return start;
}
static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
{
u64 start = gcl->base_time;
u64 n;
n = div64_u64(limit - start, gcl->cycle_time);
start += n * gcl->cycle_time;
if (start == limit)
start -= gcl->cycle_time;
return start;
}
static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
bool insert)
{
/* previous operation triggers change and properties are evaluated at
* start of operation
*/
if (index == 0)
index = gcl->count - 1;
else
index = index - 1;
change -= gcl->operation[index].interval;
/* optionally change to new list with additional operation in between */
if (insert) {
void __iomem *addr = gcl->addr +
sizeof(struct tsnep_gcl_operation) * index;
gcl->operation[index].properties |= TSNEP_GCL_INSERT;
iowrite32(gcl->operation[index].properties, addr);
}
return change;
}
static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
{
int i;
u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
void __iomem *addr;
/* search for insert operation and reset properties */
for (i = 0; i < gcl->count; i++) {
if (gcl->operation[i].properties & ~mask) {
addr = gcl->addr +
sizeof(struct tsnep_gcl_operation) * i;
gcl->operation[i].properties &= mask;
iowrite32(gcl->operation[i].properties, addr);
break;
}
}
}
static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
u64 change, u32 interval)
{
u32 properties;
properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
/* change to new list directly after inserted operation */
properties |= TSNEP_GCL_CHANGE;
/* last operation of list is reserved to insert operation */
tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
interval, false);
return tsnep_set_gcl_change(gcl, ref, change, true);
}
static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
{
int ref = gcl->count - 1;
u32 interval = gcl->operation[ref].interval + extension;
start -= gcl->operation[ref].interval;
return tsnep_insert_gcl_operation(gcl, ref, start, interval);
}
static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
{
u64 sum = 0;
int i;
/* find operation which shall be cutted */
for (i = 0; i < gcl->count; i++) {
u64 sum_tmp = sum + gcl->operation[i].interval;
u64 interval;
/* sum up operations as long as cycle time is not exceeded */
if (sum_tmp > cycle_time)
break;
/* remaining interval must be big enough for hardware */
interval = cycle_time - sum_tmp;
if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
break;
sum = sum_tmp;
}
if (sum == cycle_time) {
/* no need to cut operation itself or whole cycle
* => change exactly at operation
*/
return tsnep_set_gcl_change(gcl, i, start + sum, false);
}
return tsnep_insert_gcl_operation(gcl, i, start + sum,
cycle_time - sum);
}
static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
{
u64 system_time;
u64 timeout;
u64 limit;
/* estimate timeout limit after timeout enable, actually timeout limit
* in hardware will be earlier than estimate so we are on the safe side
*/
tsnep_get_system_time(adapter, &system_time);
timeout = system_time + TSNEP_GC_TIMEOUT;
if (curr)
limit = timeout + curr->change_limit;
else
limit = timeout;
gcl->start_time = tsnep_gcl_start_after(gcl, limit);
/* gate control time register is only 32bit => time shall be in the near
* future (no driver support for far future implemented)
*/
if ((gcl->start_time - system_time) >= U32_MAX)
return -EAGAIN;
if (curr) {
/* change gate control list */
u64 last;
u64 change;
last = tsnep_gcl_start_before(curr, gcl->start_time);
if ((last + curr->cycle_time) == gcl->start_time)
change = tsnep_cut_gcl(curr, last,
gcl->start_time - last);
else if (((gcl->start_time - last) <=
curr->cycle_time_extension) ||
((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
change = tsnep_extend_gcl(curr, last,
gcl->start_time - last);
else
change = tsnep_cut_gcl(curr, last,
gcl->start_time - last);
WARN_ON(change <= timeout);
gcl->change = true;
iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
} else {
/* start gate control list */
WARN_ON(gcl->start_time <= timeout);
gcl->change = false;
iowrite32(gcl->start_time & 0xFFFFFFFF,
adapter->addr + TSNEP_GC_TIME);
}
return 0;
}
static int tsnep_taprio(struct tsnep_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
struct tsnep_gcl *gcl;
struct tsnep_gcl *curr;
int retval;
if (!adapter->gate_control)
return -EOPNOTSUPP;
if (qopt->cmd == TAPRIO_CMD_DESTROY) {
/* disable gate control if active */
mutex_lock(&adapter->gate_control_lock);
if (adapter->gate_control_active) {
iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
adapter->gate_control_active = false;
}
mutex_unlock(&adapter->gate_control_lock);
return 0;
} else if (qopt->cmd != TAPRIO_CMD_REPLACE) {
return -EOPNOTSUPP;
}
retval = tsnep_validate_gcl(qopt);
if (retval)
return retval;
mutex_lock(&adapter->gate_control_lock);
gcl = &adapter->gcl[adapter->next_gcl];
tsnep_write_gcl(gcl, qopt);
/* select current gate control list if active */
if (adapter->gate_control_active) {
if (adapter->next_gcl == 0)
curr = &adapter->gcl[1];
else
curr = &adapter->gcl[0];
} else {
curr = NULL;
}
for (;;) {
/* start timeout which discards late enable, this helps ensuring
* that start/change time are in the future at enable
*/
iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
retval = tsnep_enable_gcl(adapter, gcl, curr);
if (retval) {
mutex_unlock(&adapter->gate_control_lock);
return retval;
}
/* enable gate control list */
if (adapter->next_gcl == 0)
iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
else
iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
/* done if timeout did not happen */
if (!(ioread32(adapter->addr + TSNEP_GC) &
TSNEP_GC_TIMEOUT_SIGNAL))
break;
/* timeout is acknowledged with any enable */
iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
if (curr)
tsnep_clean_gcl(curr);
/* retry because of timeout */
}
adapter->gate_control_active = true;
if (adapter->next_gcl == 0)
adapter->next_gcl = 1;
else
adapter->next_gcl = 0;
mutex_unlock(&adapter->gate_control_lock);
return 0;
}
static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
struct tc_query_caps_base *base)
{
switch (base->type) {
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
if (!adapter->gate_control)
return -EOPNOTSUPP;
caps->gate_mask_per_txq = true;
return 0;
}
default:
return -EOPNOTSUPP;
}
}
int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (type) {
case TC_QUERY_CAPS:
return tsnep_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_TAPRIO:
return tsnep_taprio(adapter, type_data);
default:
return -EOPNOTSUPP;
}
}
int tsnep_tc_init(struct tsnep_adapter *adapter)
{
if (!adapter->gate_control)
return 0;
/* open all gates */
iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
return 0;
}
void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
{
if (!adapter->gate_control)
return;
if (adapter->gate_control_active) {
iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
adapter->gate_control_active = false;
}
}
|
linux-master
|
drivers/net/ethernet/engleder/tsnep_tc.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021 Gerhard Engleder <[email protected]> */
#include "tsnep.h"
static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = {
"rx_packets",
"rx_bytes",
"rx_dropped",
"rx_multicast",
"rx_alloc_failed",
"rx_phy_errors",
"rx_forwarded_phy_errors",
"rx_invalid_frame_errors",
"tx_packets",
"tx_bytes",
"tx_dropped",
};
struct tsnep_stats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
u64 rx_multicast;
u64 rx_alloc_failed;
u64 rx_phy_errors;
u64 rx_forwarded_phy_errors;
u64 rx_invalid_frame_errors;
u64 tx_packets;
u64 tx_bytes;
u64 tx_dropped;
};
#define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64))
static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = {
"rx_%d_packets",
"rx_%d_bytes",
"rx_%d_dropped",
"rx_%d_multicast",
"rx_%d_alloc_failed",
"rx_%d_no_descriptor_errors",
"rx_%d_buffer_too_small_errors",
"rx_%d_fifo_overflow_errors",
"rx_%d_invalid_frame_errors",
};
struct tsnep_rx_queue_stats {
u64 rx_packets;
u64 rx_bytes;
u64 rx_dropped;
u64 rx_multicast;
u64 rx_alloc_failed;
u64 rx_no_descriptor_errors;
u64 rx_buffer_too_small_errors;
u64 rx_fifo_overflow_errors;
u64 rx_invalid_frame_errors;
};
#define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \
sizeof(u64))
static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
"tx_%d_packets",
"tx_%d_bytes",
"tx_%d_dropped",
};
struct tsnep_tx_queue_stats {
u64 tx_packets;
u64 tx_bytes;
u64 tx_dropped;
};
#define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \
sizeof(u64))
static void tsnep_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver));
strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev),
sizeof(drvinfo->bus_info));
}
static int tsnep_ethtool_get_regs_len(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
int len;
int num_additional_queues;
len = TSNEP_MAC_SIZE;
/* first queue pair is within TSNEP_MAC_SIZE, only queues additional to
* the first queue pair extend the register length by TSNEP_QUEUE_SIZE
*/
num_additional_queues =
max(adapter->num_tx_queues, adapter->num_rx_queues) - 1;
len += TSNEP_QUEUE_SIZE * num_additional_queues;
return len;
}
static void tsnep_ethtool_get_regs(struct net_device *netdev,
struct ethtool_regs *regs,
void *p)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
regs->version = 1;
memcpy_fromio(p, adapter->addr, regs->len);
}
static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
int rx_count = adapter->num_rx_queues;
int tx_count = adapter->num_tx_queues;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings));
data += sizeof(tsnep_stats_strings);
for (i = 0; i < rx_count; i++) {
for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) {
snprintf(data, ETH_GSTRING_LEN,
tsnep_rx_queue_stats_strings[j], i);
data += ETH_GSTRING_LEN;
}
}
for (i = 0; i < tx_count; i++) {
for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) {
snprintf(data, ETH_GSTRING_LEN,
tsnep_tx_queue_stats_strings[j], i);
data += ETH_GSTRING_LEN;
}
}
break;
case ETH_SS_TEST:
tsnep_ethtool_get_test_strings(data);
break;
}
}
static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats,
u64 *data)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
int rx_count = adapter->num_rx_queues;
int tx_count = adapter->num_tx_queues;
struct tsnep_stats tsnep_stats;
struct tsnep_rx_queue_stats tsnep_rx_queue_stats;
struct tsnep_tx_queue_stats tsnep_tx_queue_stats;
u32 reg;
int i;
memset(&tsnep_stats, 0, sizeof(tsnep_stats));
for (i = 0; i < adapter->num_rx_queues; i++) {
tsnep_stats.rx_packets += adapter->rx[i].packets;
tsnep_stats.rx_bytes += adapter->rx[i].bytes;
tsnep_stats.rx_dropped += adapter->rx[i].dropped;
tsnep_stats.rx_multicast += adapter->rx[i].multicast;
tsnep_stats.rx_alloc_failed += adapter->rx[i].alloc_failed;
}
reg = ioread32(adapter->addr + ECM_STAT);
tsnep_stats.rx_phy_errors =
(reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
tsnep_stats.rx_forwarded_phy_errors =
(reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
tsnep_stats.rx_invalid_frame_errors =
(reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
for (i = 0; i < adapter->num_tx_queues; i++) {
tsnep_stats.tx_packets += adapter->tx[i].packets;
tsnep_stats.tx_bytes += adapter->tx[i].bytes;
tsnep_stats.tx_dropped += adapter->tx[i].dropped;
}
memcpy(data, &tsnep_stats, sizeof(tsnep_stats));
data += TSNEP_STATS_COUNT;
for (i = 0; i < rx_count; i++) {
memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats));
tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets;
tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes;
tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped;
tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast;
tsnep_rx_queue_stats.rx_alloc_failed =
adapter->rx[i].alloc_failed;
reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
TSNEP_RX_STATISTIC);
tsnep_rx_queue_stats.rx_no_descriptor_errors =
(reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
tsnep_rx_queue_stats.rx_buffer_too_small_errors =
(reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
tsnep_rx_queue_stats.rx_fifo_overflow_errors =
(reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
tsnep_rx_queue_stats.rx_invalid_frame_errors =
(reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
memcpy(data, &tsnep_rx_queue_stats,
sizeof(tsnep_rx_queue_stats));
data += TSNEP_RX_QUEUE_STATS_COUNT;
}
for (i = 0; i < tx_count; i++) {
memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats));
tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets;
tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes;
tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped;
memcpy(data, &tsnep_tx_queue_stats,
sizeof(tsnep_tx_queue_stats));
data += TSNEP_TX_QUEUE_STATS_COUNT;
}
}
static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
int rx_count;
int tx_count;
switch (sset) {
case ETH_SS_STATS:
rx_count = adapter->num_rx_queues;
tx_count = adapter->num_tx_queues;
return TSNEP_STATS_COUNT +
TSNEP_RX_QUEUE_STATS_COUNT * rx_count +
TSNEP_TX_QUEUE_STATS_COUNT * tx_count;
case ETH_SS_TEST:
return tsnep_ethtool_get_test_count();
default:
return -EOPNOTSUPP;
}
}
static int tsnep_ethtool_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = adapter->num_rx_queues;
return 0;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = adapter->rxnfc_count;
cmd->data = adapter->rxnfc_max;
cmd->data |= RX_CLS_LOC_SPECIAL;
return 0;
case ETHTOOL_GRXCLSRULE:
return tsnep_rxnfc_get_rule(adapter, cmd);
case ETHTOOL_GRXCLSRLALL:
return tsnep_rxnfc_get_all(adapter, cmd, rule_locs);
default:
return -EOPNOTSUPP;
}
}
static int tsnep_ethtool_set_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
return tsnep_rxnfc_add_rule(adapter, cmd);
case ETHTOOL_SRXCLSRLDEL:
return tsnep_rxnfc_del_rule(adapter, cmd);
default:
return -EOPNOTSUPP;
}
}
static void tsnep_ethtool_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
ch->max_combined = adapter->num_queues;
ch->combined_count = adapter->num_queues;
}
static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
else
info->phc_index = -1;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
static struct tsnep_queue *tsnep_get_queue_with_tx(struct tsnep_adapter *adapter,
int index)
{
int i;
for (i = 0; i < adapter->num_queues; i++) {
if (adapter->queue[i].tx) {
if (index == 0)
return &adapter->queue[i];
index--;
}
}
return NULL;
}
static struct tsnep_queue *tsnep_get_queue_with_rx(struct tsnep_adapter *adapter,
int index)
{
int i;
for (i = 0; i < adapter->num_queues; i++) {
if (adapter->queue[i].rx) {
if (index == 0)
return &adapter->queue[i];
index--;
}
}
return NULL;
}
static int tsnep_ethtool_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct tsnep_queue *queue;
queue = tsnep_get_queue_with_rx(adapter, 0);
if (queue)
ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
queue = tsnep_get_queue_with_tx(adapter, 0);
if (queue)
ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
return 0;
}
static int tsnep_ethtool_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
int i;
int retval;
for (i = 0; i < adapter->num_queues; i++) {
/* RX coalesce has priority for queues with TX and RX */
if (adapter->queue[i].rx)
retval = tsnep_set_irq_coalesce(&adapter->queue[i],
ec->rx_coalesce_usecs);
else
retval = tsnep_set_irq_coalesce(&adapter->queue[i],
ec->tx_coalesce_usecs);
if (retval != 0)
return retval;
}
return 0;
}
static int tsnep_ethtool_get_per_queue_coalesce(struct net_device *netdev,
u32 queue,
struct ethtool_coalesce *ec)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct tsnep_queue *queue_with_rx;
struct tsnep_queue *queue_with_tx;
if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
return -EINVAL;
queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
if (queue_with_rx)
ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_rx);
queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
if (queue_with_tx)
ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_tx);
return 0;
}
static int tsnep_ethtool_set_per_queue_coalesce(struct net_device *netdev,
u32 queue,
struct ethtool_coalesce *ec)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct tsnep_queue *queue_with_rx;
struct tsnep_queue *queue_with_tx;
int retval;
if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
return -EINVAL;
queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
if (queue_with_rx) {
retval = tsnep_set_irq_coalesce(queue_with_rx, ec->rx_coalesce_usecs);
if (retval != 0)
return retval;
}
/* RX coalesce has priority for queues with TX and RX */
queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
if (queue_with_tx && !queue_with_tx->rx) {
retval = tsnep_set_irq_coalesce(queue_with_tx, ec->tx_coalesce_usecs);
if (retval != 0)
return retval;
}
return 0;
}
const struct ethtool_ops tsnep_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = tsnep_ethtool_get_drvinfo,
.get_regs_len = tsnep_ethtool_get_regs_len,
.get_regs = tsnep_ethtool_get_regs,
.get_msglevel = tsnep_ethtool_get_msglevel,
.set_msglevel = tsnep_ethtool_set_msglevel,
.nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
.self_test = tsnep_ethtool_self_test,
.get_strings = tsnep_ethtool_get_strings,
.get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
.get_sset_count = tsnep_ethtool_get_sset_count,
.get_rxnfc = tsnep_ethtool_get_rxnfc,
.set_rxnfc = tsnep_ethtool_set_rxnfc,
.get_channels = tsnep_ethtool_get_channels,
.get_ts_info = tsnep_ethtool_get_ts_info,
.get_coalesce = tsnep_ethtool_get_coalesce,
.set_coalesce = tsnep_ethtool_set_coalesce,
.get_per_queue_coalesce = tsnep_ethtool_get_per_queue_coalesce,
.set_per_queue_coalesce = tsnep_ethtool_set_per_queue_coalesce,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
|
linux-master
|
drivers/net/ethernet/engleder/tsnep_ethtool.c
|
// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/* Ethtool support for Mellanox Gigabit Ethernet driver
*
* Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/phy.h>
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
/* Start of struct ethtool_ops functions */
static int mlxbf_gige_get_regs_len(struct net_device *netdev)
{
return MLXBF_GIGE_MMIO_REG_SZ;
}
static void mlxbf_gige_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
regs->version = MLXBF_GIGE_REGS_VERSION;
/* Read entire MMIO register space and store results
* into the provided buffer. By design, a read to an
* offset without an existing register will be
* acknowledged and return zero.
*/
memcpy_fromio(p, priv->base, MLXBF_GIGE_MMIO_REG_SZ);
}
static void
mlxbf_gige_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ering,
struct kernel_ethtool_ringparam *kernel_ering,
struct netlink_ext_ack *extack)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
ering->rx_max_pending = MLXBF_GIGE_MAX_RXQ_SZ;
ering->tx_max_pending = MLXBF_GIGE_MAX_TXQ_SZ;
ering->rx_pending = priv->rx_q_entries;
ering->tx_pending = priv->tx_q_entries;
}
static const struct {
const char string[ETH_GSTRING_LEN];
} mlxbf_gige_ethtool_stats_keys[] = {
{ "hw_access_errors" },
{ "tx_invalid_checksums" },
{ "tx_small_frames" },
{ "tx_index_errors" },
{ "sw_config_errors" },
{ "sw_access_errors" },
{ "rx_truncate_errors" },
{ "rx_mac_errors" },
{ "rx_din_dropped_pkts" },
{ "tx_fifo_full" },
{ "rx_filter_passed_pkts" },
{ "rx_filter_discard_pkts" },
};
static int mlxbf_gige_get_sset_count(struct net_device *netdev, int stringset)
{
if (stringset != ETH_SS_STATS)
return -EOPNOTSUPP;
return ARRAY_SIZE(mlxbf_gige_ethtool_stats_keys);
}
static void mlxbf_gige_get_strings(struct net_device *netdev, u32 stringset,
u8 *buf)
{
if (stringset != ETH_SS_STATS)
return;
memcpy(buf, &mlxbf_gige_ethtool_stats_keys,
sizeof(mlxbf_gige_ethtool_stats_keys));
}
static void mlxbf_gige_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *estats,
u64 *data)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
/* Fill data array with interface statistics
*
* NOTE: the data writes must be in
* sync with the strings shown in
* the mlxbf_gige_ethtool_stats_keys[] array
*
* NOTE2: certain statistics below are zeroed upon
* port disable, so the calculation below
* must include the "cached" value of the stat
* plus the value read directly from hardware.
* Cached statistics are currently:
* rx_din_dropped_pkts
* rx_filter_passed_pkts
* rx_filter_discard_pkts
*/
*data++ = priv->stats.hw_access_errors;
*data++ = priv->stats.tx_invalid_checksums;
*data++ = priv->stats.tx_small_frames;
*data++ = priv->stats.tx_index_errors;
*data++ = priv->stats.sw_config_errors;
*data++ = priv->stats.sw_access_errors;
*data++ = priv->stats.rx_truncate_errors;
*data++ = priv->stats.rx_mac_errors;
*data++ = (priv->stats.rx_din_dropped_pkts +
readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER));
*data++ = priv->stats.tx_fifo_full;
*data++ = (priv->stats.rx_filter_passed_pkts +
readq(priv->base + MLXBF_GIGE_RX_PASS_COUNTER_ALL));
*data++ = (priv->stats.rx_filter_discard_pkts +
readq(priv->base + MLXBF_GIGE_RX_DISC_COUNTER_ALL));
}
static void mlxbf_gige_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
pause->autoneg = AUTONEG_DISABLE;
pause->rx_pause = 1;
pause->tx_pause = 1;
}
const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = mlxbf_gige_get_ringparam,
.get_regs_len = mlxbf_gige_get_regs_len,
.get_regs = mlxbf_gige_get_regs,
.get_strings = mlxbf_gige_get_strings,
.get_sset_count = mlxbf_gige_get_sset_count,
.get_ethtool_stats = mlxbf_gige_get_ethtool_stats,
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = mlxbf_gige_get_pauseparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
|
linux-master
|
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
|
// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/* Packet transmit logic for Mellanox Gigabit Ethernet driver
*
* Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/skbuff.h>
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
/* Transmit Initialization
* 1) Allocates TX WQE array using coherent DMA mapping
* 2) Allocates TX completion counter using coherent DMA mapping
*/
int mlxbf_gige_tx_init(struct mlxbf_gige *priv)
{
size_t size;
size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
priv->tx_wqe_base = dma_alloc_coherent(priv->dev, size,
&priv->tx_wqe_base_dma,
GFP_KERNEL);
if (!priv->tx_wqe_base)
return -ENOMEM;
priv->tx_wqe_next = priv->tx_wqe_base;
/* Write TX WQE base address into MMIO reg */
writeq(priv->tx_wqe_base_dma, priv->base + MLXBF_GIGE_TX_WQ_BASE);
/* Allocate address for TX completion count */
priv->tx_cc = dma_alloc_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
&priv->tx_cc_dma, GFP_KERNEL);
if (!priv->tx_cc) {
dma_free_coherent(priv->dev, size,
priv->tx_wqe_base, priv->tx_wqe_base_dma);
return -ENOMEM;
}
/* Write TX CC base address into MMIO reg */
writeq(priv->tx_cc_dma, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
writeq(ilog2(priv->tx_q_entries),
priv->base + MLXBF_GIGE_TX_WQ_SIZE_LOG2);
priv->prev_tx_ci = 0;
priv->tx_pi = 0;
return 0;
}
/* Transmit Deinitialization
* This routine will free allocations done by mlxbf_gige_tx_init(),
* namely the TX WQE array and the TX completion counter
*/
void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv)
{
u64 *tx_wqe_addr;
size_t size;
int i;
tx_wqe_addr = priv->tx_wqe_base;
for (i = 0; i < priv->tx_q_entries; i++) {
if (priv->tx_skb[i]) {
dma_unmap_single(priv->dev, *tx_wqe_addr,
priv->tx_skb[i]->len, DMA_TO_DEVICE);
dev_kfree_skb(priv->tx_skb[i]);
priv->tx_skb[i] = NULL;
}
tx_wqe_addr += 2;
}
size = MLXBF_GIGE_TX_WQE_SZ * priv->tx_q_entries;
dma_free_coherent(priv->dev, size,
priv->tx_wqe_base, priv->tx_wqe_base_dma);
dma_free_coherent(priv->dev, MLXBF_GIGE_TX_CC_SZ,
priv->tx_cc, priv->tx_cc_dma);
priv->tx_wqe_base = NULL;
priv->tx_wqe_base_dma = 0;
priv->tx_cc = NULL;
priv->tx_cc_dma = 0;
priv->tx_wqe_next = NULL;
writeq(0, priv->base + MLXBF_GIGE_TX_WQ_BASE);
writeq(0, priv->base + MLXBF_GIGE_TX_CI_UPDATE_ADDRESS);
}
/* Function that returns status of TX ring:
* 0: TX ring is full, i.e. there are no
* available un-used entries in TX ring.
* non-null: TX ring is not full, i.e. there are
* some available entries in TX ring.
* The non-null value is a measure of
* how many TX entries are available, but
* it is not the exact number of available
* entries (see below).
*
* The algorithm makes the assumption that if
* (prev_tx_ci == tx_pi) then the TX ring is empty.
* An empty ring actually has (tx_q_entries-1)
* entries, which allows the algorithm to differentiate
* the case of an empty ring vs. a full ring.
*/
static u16 mlxbf_gige_tx_buffs_avail(struct mlxbf_gige *priv)
{
unsigned long flags;
u16 avail;
spin_lock_irqsave(&priv->lock, flags);
if (priv->prev_tx_ci == priv->tx_pi)
avail = priv->tx_q_entries - 1;
else
avail = ((priv->tx_q_entries + priv->prev_tx_ci - priv->tx_pi)
% priv->tx_q_entries) - 1;
spin_unlock_irqrestore(&priv->lock, flags);
return avail;
}
bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
{
struct net_device_stats *stats;
u16 tx_wqe_index;
u64 *tx_wqe_addr;
u64 tx_status;
u16 tx_ci;
tx_status = readq(priv->base + MLXBF_GIGE_TX_STATUS);
if (tx_status & MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL)
priv->stats.tx_fifo_full++;
tx_ci = readq(priv->base + MLXBF_GIGE_TX_CONSUMER_INDEX);
stats = &priv->netdev->stats;
/* Transmit completion logic needs to loop until the completion
* index (in SW) equals TX consumer index (from HW). These
* parameters are unsigned 16-bit values and the wrap case needs
* to be supported, that is TX consumer index wrapped from 0xFFFF
* to 0 while TX completion index is still < 0xFFFF.
*/
for (; priv->prev_tx_ci != tx_ci; priv->prev_tx_ci++) {
tx_wqe_index = priv->prev_tx_ci % priv->tx_q_entries;
/* Each TX WQE is 16 bytes. The 8 MSB store the 2KB TX
* buffer address and the 8 LSB contain information
* about the TX WQE.
*/
tx_wqe_addr = priv->tx_wqe_base +
(tx_wqe_index * MLXBF_GIGE_TX_WQE_SZ_QWORDS);
stats->tx_packets++;
stats->tx_bytes += MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr);
dma_unmap_single(priv->dev, *tx_wqe_addr,
priv->tx_skb[tx_wqe_index]->len, DMA_TO_DEVICE);
dev_consume_skb_any(priv->tx_skb[tx_wqe_index]);
priv->tx_skb[tx_wqe_index] = NULL;
/* Ensure completion of updates across all cores */
mb();
}
/* Since the TX ring was likely just drained, check if TX queue
* had previously been stopped and now that there are TX buffers
* available the TX queue can be awakened.
*/
if (netif_queue_stopped(priv->netdev) &&
mlxbf_gige_tx_buffs_avail(priv))
netif_wake_queue(priv->netdev);
return true;
}
/* Function to advance the tx_wqe_next pointer to next TX WQE */
void mlxbf_gige_update_tx_wqe_next(struct mlxbf_gige *priv)
{
/* Advance tx_wqe_next pointer */
priv->tx_wqe_next += MLXBF_GIGE_TX_WQE_SZ_QWORDS;
/* Check if 'next' pointer is beyond end of TX ring */
/* If so, set 'next' back to 'base' pointer of ring */
if (priv->tx_wqe_next == (priv->tx_wqe_base +
(priv->tx_q_entries * MLXBF_GIGE_TX_WQE_SZ_QWORDS)))
priv->tx_wqe_next = priv->tx_wqe_base;
}
netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
long buff_addr, start_dma_page, end_dma_page;
struct sk_buff *tx_skb;
dma_addr_t tx_buf_dma;
unsigned long flags;
u64 *tx_wqe_addr;
u64 word2;
/* If needed, linearize TX SKB as hardware DMA expects this */
if (skb->len > MLXBF_GIGE_DEFAULT_BUF_SZ || skb_linearize(skb)) {
dev_kfree_skb(skb);
netdev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
buff_addr = (long)skb->data;
start_dma_page = buff_addr >> MLXBF_GIGE_DMA_PAGE_SHIFT;
end_dma_page = (buff_addr + skb->len - 1) >> MLXBF_GIGE_DMA_PAGE_SHIFT;
/* Verify that payload pointer and data length of SKB to be
* transmitted does not violate the hardware DMA limitation.
*/
if (start_dma_page != end_dma_page) {
/* DMA operation would fail as-is, alloc new aligned SKB */
tx_skb = mlxbf_gige_alloc_skb(priv, skb->len,
&tx_buf_dma, DMA_TO_DEVICE);
if (!tx_skb) {
/* Free original skb, could not alloc new aligned SKB */
dev_kfree_skb(skb);
netdev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
skb_put_data(tx_skb, skb->data, skb->len);
/* Free the original SKB */
dev_kfree_skb(skb);
} else {
tx_skb = skb;
tx_buf_dma = dma_map_single(priv->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, tx_buf_dma)) {
dev_kfree_skb(skb);
netdev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
}
/* Get address of TX WQE */
tx_wqe_addr = priv->tx_wqe_next;
mlxbf_gige_update_tx_wqe_next(priv);
/* Put PA of buffer address into first 64-bit word of TX WQE */
*tx_wqe_addr = tx_buf_dma;
/* Set TX WQE pkt_len appropriately
* NOTE: GigE silicon will automatically pad up to
* minimum packet length if needed.
*/
word2 = tx_skb->len & MLXBF_GIGE_TX_WQE_PKT_LEN_MASK;
/* Write entire 2nd word of TX WQE */
*(tx_wqe_addr + 1) = word2;
spin_lock_irqsave(&priv->lock, flags);
priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
priv->tx_pi++;
spin_unlock_irqrestore(&priv->lock, flags);
if (!netdev_xmit_more()) {
/* Create memory barrier before write to TX PI */
wmb();
writeq(priv->tx_pi, priv->base + MLXBF_GIGE_TX_PRODUCER_INDEX);
}
/* Check if the last TX entry was just used */
if (!mlxbf_gige_tx_buffs_avail(priv)) {
/* TX ring is full, inform stack */
netif_stop_queue(netdev);
/* Since there is no separate "TX complete" interrupt, need
* to explicitly schedule NAPI poll. This will trigger logic
* which processes TX completions, and will hopefully drain
* the TX ring allowing the TX queue to be awakened.
*/
napi_schedule(&priv->napi);
}
return NETDEV_TX_OK;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
|
// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/* MDIO support for Mellanox Gigabit Ethernet driver
*
* Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irqreturn.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
#include "mlxbf_gige_mdio_bf2.h"
#include "mlxbf_gige_mdio_bf3.h"
static struct mlxbf_gige_mdio_gw mlxbf_gige_mdio_gw_t[] = {
[MLXBF_GIGE_VERSION_BF2] = {
.gw_address = MLXBF2_GIGE_MDIO_GW_OFFSET,
.read_data_address = MLXBF2_GIGE_MDIO_GW_OFFSET,
.busy = {
.mask = MLXBF2_GIGE_MDIO_GW_BUSY_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT,
},
.read_data = {
.mask = MLXBF2_GIGE_MDIO_GW_AD_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT,
},
.write_data = {
.mask = MLXBF2_GIGE_MDIO_GW_AD_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT,
},
.devad = {
.mask = MLXBF2_GIGE_MDIO_GW_DEVAD_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT,
},
.partad = {
.mask = MLXBF2_GIGE_MDIO_GW_PARTAD_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT,
},
.opcode = {
.mask = MLXBF2_GIGE_MDIO_GW_OPCODE_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT,
},
.st1 = {
.mask = MLXBF2_GIGE_MDIO_GW_ST1_MASK,
.shift = MLXBF2_GIGE_MDIO_GW_ST1_SHIFT,
},
},
[MLXBF_GIGE_VERSION_BF3] = {
.gw_address = MLXBF3_GIGE_MDIO_GW_OFFSET,
.read_data_address = MLXBF3_GIGE_MDIO_DATA_READ,
.busy = {
.mask = MLXBF3_GIGE_MDIO_GW_BUSY_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT,
},
.read_data = {
.mask = MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT,
},
.write_data = {
.mask = MLXBF3_GIGE_MDIO_GW_DATA_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_DATA_SHIFT,
},
.devad = {
.mask = MLXBF3_GIGE_MDIO_GW_DEVAD_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT,
},
.partad = {
.mask = MLXBF3_GIGE_MDIO_GW_PARTAD_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT,
},
.opcode = {
.mask = MLXBF3_GIGE_MDIO_GW_OPCODE_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT,
},
.st1 = {
.mask = MLXBF3_GIGE_MDIO_GW_ST1_MASK,
.shift = MLXBF3_GIGE_MDIO_GW_ST1_SHIFT,
},
},
};
#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
#define MLXBF_GIGE_MDC_CLK_NS 400
#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG1 0x4
#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG2 0x8
#define MLXBF_GIGE_MDIO_CORE_F_SHIFT 0
#define MLXBF_GIGE_MDIO_CORE_F_MASK GENMASK(25, 0)
#define MLXBF_GIGE_MDIO_CORE_R_SHIFT 26
#define MLXBF_GIGE_MDIO_CORE_R_MASK GENMASK(31, 26)
#define MLXBF_GIGE_MDIO_CORE_OD_SHIFT 0
#define MLXBF_GIGE_MDIO_CORE_OD_MASK GENMASK(3, 0)
/* Support clause 22 */
#define MLXBF_GIGE_MDIO_CL22_ST1 0x1
#define MLXBF_GIGE_MDIO_CL22_WRITE 0x1
#define MLXBF_GIGE_MDIO_CL22_READ 0x2
/* Busy bit is set by software and cleared by hardware */
#define MLXBF_GIGE_MDIO_SET_BUSY 0x1
#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
#define MLXBF_GIGE_BF3_COREPLL_ADDR 0x13409824
#define MLXBF_GIGE_BF3_COREPLL_SIZE 0x00000010
static struct resource corepll_params[] = {
[MLXBF_GIGE_VERSION_BF2] = {
.start = MLXBF_GIGE_BF2_COREPLL_ADDR,
.end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
.name = "COREPLL_RES"
},
[MLXBF_GIGE_VERSION_BF3] = {
.start = MLXBF_GIGE_BF3_COREPLL_ADDR,
.end = MLXBF_GIGE_BF3_COREPLL_ADDR + MLXBF_GIGE_BF3_COREPLL_SIZE - 1,
.name = "COREPLL_RES"
}
};
/* Returns core clock i1clk in Hz */
static u64 calculate_i1clk(struct mlxbf_gige *priv)
{
u8 core_od, core_r;
u64 freq_output;
u32 reg1, reg2;
u32 core_f;
reg1 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG1);
reg2 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG2);
core_f = (reg1 & MLXBF_GIGE_MDIO_CORE_F_MASK) >>
MLXBF_GIGE_MDIO_CORE_F_SHIFT;
core_r = (reg1 & MLXBF_GIGE_MDIO_CORE_R_MASK) >>
MLXBF_GIGE_MDIO_CORE_R_SHIFT;
core_od = (reg2 & MLXBF_GIGE_MDIO_CORE_OD_MASK) >>
MLXBF_GIGE_MDIO_CORE_OD_SHIFT;
/* Compute PLL output frequency as follow:
*
* CORE_F / 16384
* freq_output = freq_reference * ----------------------------
* (CORE_R + 1) * (CORE_OD + 1)
*/
freq_output = div_u64((MLXBF_GIGE_MDIO_FREQ_REFERENCE * core_f),
MLXBF_GIGE_MDIO_COREPLL_CONST);
freq_output = div_u64(freq_output, (core_r + 1) * (core_od + 1));
return freq_output;
}
/* Formula for encoding the MDIO period. The encoded value is
* passed to the MDIO config register.
*
* mdc_clk = 2*(val + 1)*(core clock in sec)
*
* i1clk is in Hz:
* 400 ns = 2*(val + 1)*(1/i1clk)
*
* val = (((400/10^9) / (1/i1clk) / 2) - 1)
* val = (400/2 * i1clk)/10^9 - 1
*/
static u8 mdio_period_map(struct mlxbf_gige *priv)
{
u8 mdio_period;
u64 i1clk;
i1clk = calculate_i1clk(priv);
mdio_period = div_u64((MLXBF_GIGE_MDC_CLK_NS >> 1) * i1clk, 1000000000) - 1;
return mdio_period;
}
static u32 mlxbf_gige_mdio_create_cmd(struct mlxbf_gige_mdio_gw *mdio_gw, u16 data, int phy_add,
int phy_reg, u32 opcode)
{
u32 gw_reg = 0;
gw_reg |= ((data << mdio_gw->write_data.shift) &
mdio_gw->write_data.mask);
gw_reg |= ((phy_reg << mdio_gw->devad.shift) &
mdio_gw->devad.mask);
gw_reg |= ((phy_add << mdio_gw->partad.shift) &
mdio_gw->partad.mask);
gw_reg |= ((opcode << mdio_gw->opcode.shift) &
mdio_gw->opcode.mask);
gw_reg |= ((MLXBF_GIGE_MDIO_CL22_ST1 << mdio_gw->st1.shift) &
mdio_gw->st1.mask);
gw_reg |= ((MLXBF_GIGE_MDIO_SET_BUSY << mdio_gw->busy.shift) &
mdio_gw->busy.mask);
return gw_reg;
}
static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
{
struct mlxbf_gige *priv = bus->priv;
u32 cmd;
int ret;
u32 val;
/* Send mdio read request */
cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, 0, phy_add, phy_reg,
MLXBF_GIGE_MDIO_CL22_READ);
writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address);
ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address,
val, !(val & priv->mdio_gw->busy.mask),
5, 1000000);
if (ret) {
writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
ret = readl(priv->mdio_io + priv->mdio_gw->read_data_address);
/* Only return ad bits of the gw register */
ret &= priv->mdio_gw->read_data.mask;
/* The MDIO lock is set on read. To release it, clear gw register */
writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
int phy_reg, u16 val)
{
struct mlxbf_gige *priv = bus->priv;
u32 temp;
u32 cmd;
int ret;
/* Send mdio write request */
cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, val, phy_add, phy_reg,
MLXBF_GIGE_MDIO_CL22_WRITE);
writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address);
/* If the poll timed out, drop the request */
ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address,
temp, !(temp & priv->mdio_gw->busy.mask),
5, 1000000);
/* The MDIO lock is set on read. To release it, clear gw register */
writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
{
u8 mdio_period;
u32 val;
mdio_period = mdio_period_map(priv);
if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
val = MLXBF2_GIGE_MDIO_CFG_VAL;
val |= FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
writel(val, priv->mdio_io + MLXBF2_GIGE_MDIO_CFG_OFFSET);
} else {
val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) |
FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1);
writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG0);
val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG1);
val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) |
FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13);
writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG2);
}
}
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
{
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
if (priv->hw_version > MLXBF_GIGE_VERSION_BF3)
return -ENODEV;
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
/* clk resource shared with other drivers so cannot use
* devm_platform_ioremap_resource
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_CLK);
if (!res) {
/* For backward compatibility with older ACPI tables, also keep
* CLK resource internal to the driver.
*/
res = &corepll_params[priv->hw_version];
}
priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->clk_io)
return -ENOMEM;
priv->mdio_gw = &mlxbf_gige_mdio_gw_t[priv->hw_version];
mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
if (!priv->mdiobus) {
dev_err(dev, "Failed to alloc MDIO bus\n");
return -ENOMEM;
}
priv->mdiobus->name = "mlxbf-mdio";
priv->mdiobus->read = mlxbf_gige_mdio_read;
priv->mdiobus->write = mlxbf_gige_mdio_write;
priv->mdiobus->parent = dev;
priv->mdiobus->priv = priv;
snprintf(priv->mdiobus->id, MII_BUS_ID_SIZE, "%s",
dev_name(dev));
ret = mdiobus_register(priv->mdiobus);
if (ret)
dev_err(dev, "Failed to register MDIO bus\n");
return ret;
}
void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv)
{
mdiobus_unregister(priv->mdiobus);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
|
// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/* Packet receive logic for Mellanox Gigabit Ethernet driver
*
* Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac)
{
void __iomem *base = priv->base;
u64 control;
/* Write destination MAC to specified MAC RX filter */
writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
(index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
/* Enable MAC receive filter mask for specified index */
control = readq(base + MLXBF_GIGE_CONTROL);
control |= (MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
writeq(control, base + MLXBF_GIGE_CONTROL);
}
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 *dmac)
{
void __iomem *base = priv->base;
/* Read destination MAC from specified MAC RX filter */
*dmac = readq(base + MLXBF_GIGE_RX_MAC_FILTER +
(index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
}
void mlxbf_gige_enable_promisc(struct mlxbf_gige *priv)
{
void __iomem *base = priv->base;
u64 control;
u64 end_mac;
/* Enable MAC_ID_RANGE match functionality */
control = readq(base + MLXBF_GIGE_CONTROL);
control |= MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
writeq(control, base + MLXBF_GIGE_CONTROL);
/* Set start of destination MAC range check to 0 */
writeq(0, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START);
/* Set end of destination MAC range check to all FFs */
end_mac = BCAST_MAC_ADDR;
writeq(end_mac, base + MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END);
}
void mlxbf_gige_disable_promisc(struct mlxbf_gige *priv)
{
void __iomem *base = priv->base;
u64 control;
/* Disable MAC_ID_RANGE match functionality */
control = readq(base + MLXBF_GIGE_CONTROL);
control &= ~MLXBF_GIGE_CONTROL_MAC_ID_RANGE_EN;
writeq(control, base + MLXBF_GIGE_CONTROL);
/* NOTE: no need to change DMAC_RANGE_START or END;
* those values are ignored since MAC_ID_RANGE_EN=0
*/
}
/* Receive Initialization
* 1) Configures RX MAC filters via MMIO registers
* 2) Allocates RX WQE array using coherent DMA mapping
* 3) Initializes each element of RX WQE array with a receive
* buffer pointer (also using coherent DMA mapping)
* 4) Allocates RX CQE array using coherent DMA mapping
* 5) Completes other misc receive initialization
*/
int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
{
size_t wq_size, cq_size;
dma_addr_t *rx_wqe_ptr;
dma_addr_t rx_buf_dma;
u64 data;
int i, j;
/* Configure MAC RX filter #0 to allow RX of broadcast pkts */
mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX,
BCAST_MAC_ADDR);
wq_size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
priv->rx_wqe_base = dma_alloc_coherent(priv->dev, wq_size,
&priv->rx_wqe_base_dma,
GFP_KERNEL);
if (!priv->rx_wqe_base)
return -ENOMEM;
/* Initialize 'rx_wqe_ptr' to point to first RX WQE in array
* Each RX WQE is simply a receive buffer pointer, so walk
* the entire array, allocating a 2KB buffer for each element
*/
rx_wqe_ptr = priv->rx_wqe_base;
for (i = 0; i < priv->rx_q_entries; i++) {
priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
&rx_buf_dma, DMA_FROM_DEVICE);
if (!priv->rx_skb[i])
goto free_wqe_and_skb;
*rx_wqe_ptr++ = rx_buf_dma;
}
/* Write RX WQE base address into MMIO reg */
writeq(priv->rx_wqe_base_dma, priv->base + MLXBF_GIGE_RX_WQ_BASE);
cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size,
&priv->rx_cqe_base_dma,
GFP_KERNEL);
if (!priv->rx_cqe_base)
goto free_wqe_and_skb;
for (i = 0; i < priv->rx_q_entries; i++)
priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
/* Write RX CQE base address into MMIO reg */
writeq(priv->rx_cqe_base_dma, priv->base + MLXBF_GIGE_RX_CQ_BASE);
/* Write RX_WQE_PI with current number of replenished buffers */
writeq(priv->rx_q_entries, priv->base + MLXBF_GIGE_RX_WQE_PI);
/* Enable removal of CRC during RX */
data = readq(priv->base + MLXBF_GIGE_RX);
data |= MLXBF_GIGE_RX_STRIP_CRC_EN;
writeq(data, priv->base + MLXBF_GIGE_RX);
/* Enable RX MAC filter pass and discard counters */
writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN,
priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC);
writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN,
priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS);
/* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
* indicate readiness to receive interrupts
*/
data = readq(priv->base + MLXBF_GIGE_INT_MASK);
data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
/* Enable RX DMA to write new packets to memory */
data = readq(priv->base + MLXBF_GIGE_RX_DMA);
data |= MLXBF_GIGE_RX_DMA_EN;
writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
writeq(ilog2(priv->rx_q_entries),
priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
return 0;
free_wqe_and_skb:
rx_wqe_ptr = priv->rx_wqe_base;
for (j = 0; j < i; j++) {
dma_unmap_single(priv->dev, *rx_wqe_ptr,
MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_skb[j]);
rx_wqe_ptr++;
}
dma_free_coherent(priv->dev, wq_size,
priv->rx_wqe_base, priv->rx_wqe_base_dma);
return -ENOMEM;
}
/* Receive Deinitialization
* This routine will free allocations done by mlxbf_gige_rx_init(),
* namely the RX WQE and RX CQE arrays, as well as all RX buffers
*/
void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
{
dma_addr_t *rx_wqe_ptr;
size_t size;
u64 data;
int i;
/* Disable RX DMA to prevent packet transfers to memory */
data = readq(priv->base + MLXBF_GIGE_RX_DMA);
data &= ~MLXBF_GIGE_RX_DMA_EN;
writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
rx_wqe_ptr = priv->rx_wqe_base;
for (i = 0; i < priv->rx_q_entries; i++) {
dma_unmap_single(priv->dev, *rx_wqe_ptr, MLXBF_GIGE_DEFAULT_BUF_SZ,
DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_skb[i]);
rx_wqe_ptr++;
}
size = MLXBF_GIGE_RX_WQE_SZ * priv->rx_q_entries;
dma_free_coherent(priv->dev, size,
priv->rx_wqe_base, priv->rx_wqe_base_dma);
size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries;
dma_free_coherent(priv->dev, size,
priv->rx_cqe_base, priv->rx_cqe_base_dma);
priv->rx_wqe_base = NULL;
priv->rx_wqe_base_dma = 0;
priv->rx_cqe_base = NULL;
priv->rx_cqe_base_dma = 0;
writeq(0, priv->base + MLXBF_GIGE_RX_WQ_BASE);
writeq(0, priv->base + MLXBF_GIGE_RX_CQ_BASE);
}
static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
{
struct net_device *netdev = priv->netdev;
struct sk_buff *skb = NULL, *rx_skb;
u16 rx_pi_rem, rx_ci_rem;
dma_addr_t *rx_wqe_addr;
dma_addr_t rx_buf_dma;
u64 *rx_cqe_addr;
u64 datalen;
u64 rx_cqe;
u16 rx_ci;
u16 rx_pi;
/* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
rx_pi = readq(priv->base + MLXBF_GIGE_RX_WQE_PI);
rx_pi_rem = rx_pi % priv->rx_q_entries;
rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
rx_cqe = *rx_cqe_addr;
if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
return false;
if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
/* Packet is OK, increment stats */
datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += datalen;
skb = priv->rx_skb[rx_pi_rem];
/* Alloc another RX SKB for this same index */
rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
&rx_buf_dma, DMA_FROM_DEVICE);
if (!rx_skb)
return false;
priv->rx_skb[rx_pi_rem] = rx_skb;
dma_unmap_single(priv->dev, *rx_wqe_addr,
MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
skb_put(skb, datalen);
skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
skb->protocol = eth_type_trans(skb, netdev);
*rx_wqe_addr = rx_buf_dma;
} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
priv->stats.rx_mac_errors++;
} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_TRUNCATED) {
priv->stats.rx_truncate_errors++;
}
/* Let hardware know we've replenished one buffer */
rx_pi++;
/* Ensure completion of all writes before notifying HW of replenish */
wmb();
writeq(rx_pi, priv->base + MLXBF_GIGE_RX_WQE_PI);
(*rx_pkts)++;
rx_pi_rem = rx_pi % priv->rx_q_entries;
if (rx_pi_rem == 0)
priv->valid_polarity ^= 1;
rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
rx_ci_rem = rx_ci % priv->rx_q_entries;
if (skb)
netif_receive_skb(skb);
return rx_pi_rem != rx_ci_rem;
}
/* Driver poll() function called by NAPI infrastructure */
int mlxbf_gige_poll(struct napi_struct *napi, int budget)
{
struct mlxbf_gige *priv;
bool remaining_pkts;
int work_done = 0;
u64 data;
priv = container_of(napi, struct mlxbf_gige, napi);
mlxbf_gige_handle_tx_complete(priv);
do {
remaining_pkts = mlxbf_gige_rx_packet(priv, &work_done);
} while (remaining_pkts && work_done < budget);
/* If amount of work done < budget, turn off NAPI polling
* via napi_complete_done(napi, work_done) and then
* re-enable interrupts.
*/
if (work_done < budget && napi_complete_done(napi, work_done)) {
/* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to
* indicate receive readiness
*/
data = readq(priv->base + MLXBF_GIGE_INT_MASK);
data &= ~MLXBF_GIGE_INT_MASK_RX_RECEIVE_PACKET;
writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
}
return work_done;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
|
// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/* Gigabit Ethernet driver for Mellanox BlueField SoC
*
* Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
/* Allocate SKB whose payload pointer aligns with the Bluefield
* hardware DMA limitation, i.e. DMA operation can't cross
* a 4KB boundary. A maximum packet size of 2KB is assumed in the
* alignment formula. The alignment logic overallocates an SKB,
* and then adjusts the headroom so that the SKB data pointer is
* naturally aligned to a 2KB boundary.
*/
struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
unsigned int map_len,
dma_addr_t *buf_dma,
enum dma_data_direction dir)
{
struct sk_buff *skb;
u64 addr, offset;
/* Overallocate the SKB so that any headroom adjustment (to
* provide 2KB natural alignment) does not exceed payload area
*/
skb = netdev_alloc_skb(priv->netdev, MLXBF_GIGE_DEFAULT_BUF_SZ * 2);
if (!skb)
return NULL;
/* Adjust the headroom so that skb->data is naturally aligned to
* a 2KB boundary, which is the maximum packet size supported.
*/
addr = (long)skb->data;
offset = (addr + MLXBF_GIGE_DEFAULT_BUF_SZ - 1) &
~(MLXBF_GIGE_DEFAULT_BUF_SZ - 1);
offset -= addr;
if (offset)
skb_reserve(skb, offset);
/* Return streaming DMA mapping to caller */
*buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
if (dma_mapping_error(priv->dev, *buf_dma)) {
dev_kfree_skb(skb);
*buf_dma = (dma_addr_t)0;
return NULL;
}
return skb;
}
static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
{
u8 mac[ETH_ALEN];
u64 local_mac;
eth_zero_addr(mac);
mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
&local_mac);
u64_to_ether_addr(local_mac, mac);
if (is_valid_ether_addr(mac)) {
eth_hw_addr_set(priv->netdev, mac);
} else {
/* Provide a random MAC if for some reason the device has
* not been configured with a valid MAC address already.
*/
eth_hw_addr_random(priv->netdev);
}
local_mac = ether_addr_to_u64(priv->netdev->dev_addr);
mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
local_mac);
}
static void mlxbf_gige_cache_stats(struct mlxbf_gige *priv)
{
struct mlxbf_gige_stats *p;
/* Cache stats that will be cleared by clean port operation */
p = &priv->stats;
p->rx_din_dropped_pkts += readq(priv->base +
MLXBF_GIGE_RX_DIN_DROP_COUNTER);
p->rx_filter_passed_pkts += readq(priv->base +
MLXBF_GIGE_RX_PASS_COUNTER_ALL);
p->rx_filter_discard_pkts += readq(priv->base +
MLXBF_GIGE_RX_DISC_COUNTER_ALL);
}
static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
{
u64 control;
u64 temp;
int err;
/* Set the CLEAN_PORT_EN bit to trigger SW reset */
control = readq(priv->base + MLXBF_GIGE_CONTROL);
control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
writeq(control, priv->base + MLXBF_GIGE_CONTROL);
/* Ensure completion of "clean port" write before polling status */
mb();
err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
(temp & MLXBF_GIGE_STATUS_READY),
100, 100000);
/* Clear the CLEAN_PORT_EN bit at end of this loop */
control = readq(priv->base + MLXBF_GIGE_CONTROL);
control &= ~MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
writeq(control, priv->base + MLXBF_GIGE_CONTROL);
return err;
}
static int mlxbf_gige_open(struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
u64 int_en;
int err;
err = mlxbf_gige_request_irqs(priv);
if (err)
return err;
mlxbf_gige_cache_stats(priv);
err = mlxbf_gige_clean_port(priv);
if (err)
goto free_irqs;
/* Clear driver's valid_polarity to match hardware,
* since the above call to clean_port() resets the
* receive polarity used by hardware.
*/
priv->valid_polarity = 0;
err = mlxbf_gige_rx_init(priv);
if (err)
goto free_irqs;
err = mlxbf_gige_tx_init(priv);
if (err)
goto rx_deinit;
phy_start(phydev);
netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll);
napi_enable(&priv->napi);
netif_start_queue(netdev);
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE |
MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE |
MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
/* Ensure completion of all initialization before enabling interrupts */
mb();
writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
return 0;
rx_deinit:
mlxbf_gige_rx_deinit(priv);
free_irqs:
mlxbf_gige_free_irqs(priv);
return err;
}
static int mlxbf_gige_stop(struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
writeq(0, priv->base + MLXBF_GIGE_INT_EN);
netif_stop_queue(netdev);
napi_disable(&priv->napi);
netif_napi_del(&priv->napi);
mlxbf_gige_free_irqs(priv);
phy_stop(netdev->phydev);
mlxbf_gige_rx_deinit(priv);
mlxbf_gige_tx_deinit(priv);
mlxbf_gige_cache_stats(priv);
mlxbf_gige_clean_port(priv);
return 0;
}
static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
if (!(netif_running(netdev)))
return -EINVAL;
return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
bool new_promisc_enabled;
new_promisc_enabled = netdev->flags & IFF_PROMISC;
/* Only write to the hardware registers if the new setting
* of promiscuous mode is different from the current one.
*/
if (new_promisc_enabled != priv->promisc_enabled) {
priv->promisc_enabled = new_promisc_enabled;
if (new_promisc_enabled)
mlxbf_gige_enable_promisc(priv);
else
mlxbf_gige_disable_promisc(priv);
}
}
static void mlxbf_gige_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
netdev_stats_to_stats64(stats, &netdev->stats);
stats->rx_length_errors = priv->stats.rx_truncate_errors;
stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
stats->rx_crc_errors = priv->stats.rx_mac_errors;
stats->rx_errors = stats->rx_length_errors +
stats->rx_fifo_errors +
stats->rx_crc_errors;
stats->tx_fifo_errors = priv->stats.tx_fifo_full;
stats->tx_errors = stats->tx_fifo_errors;
}
static const struct net_device_ops mlxbf_gige_netdev_ops = {
.ndo_open = mlxbf_gige_open,
.ndo_stop = mlxbf_gige_stop,
.ndo_start_xmit = mlxbf_gige_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = mlxbf_gige_eth_ioctl,
.ndo_set_rx_mode = mlxbf_gige_set_rx_mode,
.ndo_get_stats64 = mlxbf_gige_get_stats64,
};
static void mlxbf_gige_bf2_adjust_link(struct net_device *netdev)
{
struct phy_device *phydev = netdev->phydev;
phy_print_status(phydev);
}
static void mlxbf_gige_bf3_adjust_link(struct net_device *netdev)
{
struct mlxbf_gige *priv = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
u8 sgmii_mode;
u16 ipg_size;
u32 val;
if (phydev->link && phydev->speed != priv->prev_speed) {
switch (phydev->speed) {
case 1000:
ipg_size = MLXBF_GIGE_1G_IPG_SIZE;
sgmii_mode = MLXBF_GIGE_1G_SGMII_MODE;
break;
case 100:
ipg_size = MLXBF_GIGE_100M_IPG_SIZE;
sgmii_mode = MLXBF_GIGE_100M_SGMII_MODE;
break;
case 10:
ipg_size = MLXBF_GIGE_10M_IPG_SIZE;
sgmii_mode = MLXBF_GIGE_10M_SGMII_MODE;
break;
default:
return;
}
val = readl(priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
val &= ~(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK | MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK);
val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK, ipg_size);
val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK, sgmii_mode);
writel(val, priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
val = readl(priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
val &= ~MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK;
val |= FIELD_PREP(MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK, sgmii_mode);
writel(val, priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
priv->prev_speed = phydev->speed;
}
phy_print_status(phydev);
}
static void mlxbf_gige_bf2_set_phy_link_mode(struct phy_device *phydev)
{
/* MAC only supports 1000T full duplex mode */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
/* Only symmetric pause with flow control enabled is supported so no
* need to negotiate pause.
*/
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
}
static void mlxbf_gige_bf3_set_phy_link_mode(struct phy_device *phydev)
{
/* MAC only supports full duplex mode */
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
/* Only symmetric pause with flow control enabled is supported so no
* need to negotiate pause.
*/
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
}
static struct mlxbf_gige_link_cfg mlxbf_gige_link_cfgs[] = {
[MLXBF_GIGE_VERSION_BF2] = {
.set_phy_link_mode = mlxbf_gige_bf2_set_phy_link_mode,
.adjust_link = mlxbf_gige_bf2_adjust_link,
.phy_mode = PHY_INTERFACE_MODE_GMII
},
[MLXBF_GIGE_VERSION_BF3] = {
.set_phy_link_mode = mlxbf_gige_bf3_set_phy_link_mode,
.adjust_link = mlxbf_gige_bf3_adjust_link,
.phy_mode = PHY_INTERFACE_MODE_SGMII
}
};
static int mlxbf_gige_probe(struct platform_device *pdev)
{
struct phy_device *phydev;
struct net_device *netdev;
struct mlxbf_gige *priv;
void __iomem *llu_base;
void __iomem *plu_base;
void __iomem *base;
int addr, phy_irq;
u64 control;
int err;
base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
if (IS_ERR(base))
return PTR_ERR(base);
llu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_LLU);
if (IS_ERR(llu_base))
return PTR_ERR(llu_base);
plu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_PLU);
if (IS_ERR(plu_base))
return PTR_ERR(plu_base);
/* Perform general init of GigE block */
control = readq(base + MLXBF_GIGE_CONTROL);
control |= MLXBF_GIGE_CONTROL_PORT_EN;
writeq(control, base + MLXBF_GIGE_CONTROL);
netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
if (!netdev)
return -ENOMEM;
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->netdev_ops = &mlxbf_gige_netdev_ops;
netdev->ethtool_ops = &mlxbf_gige_ethtool_ops;
priv = netdev_priv(netdev);
priv->netdev = netdev;
platform_set_drvdata(pdev, priv);
priv->dev = &pdev->dev;
priv->pdev = pdev;
spin_lock_init(&priv->lock);
priv->hw_version = readq(base + MLXBF_GIGE_VERSION);
/* Attach MDIO device */
err = mlxbf_gige_mdio_probe(pdev, priv);
if (err)
return err;
priv->base = base;
priv->llu_base = llu_base;
priv->plu_base = plu_base;
priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
/* Write initial MAC address to hardware */
mlxbf_gige_initial_mac(priv);
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
goto out;
}
priv->error_irq = platform_get_irq(pdev, MLXBF_GIGE_ERROR_INTR_IDX);
priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy-gpios", 0);
if (phy_irq < 0) {
dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
phy_irq = PHY_POLL;
}
phydev = phy_find_first(priv->mdiobus);
if (!phydev) {
err = -ENODEV;
goto out;
}
addr = phydev->mdio.addr;
priv->mdiobus->irq[addr] = phy_irq;
phydev->irq = phy_irq;
err = phy_connect_direct(netdev, phydev,
mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
if (err) {
dev_err(&pdev->dev, "Could not attach to PHY\n");
goto out;
}
mlxbf_gige_link_cfgs[priv->hw_version].set_phy_link_mode(phydev);
/* Display information about attached PHY device */
phy_attached_info(phydev);
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
phy_disconnect(phydev);
goto out;
}
return 0;
out:
mlxbf_gige_mdio_remove(priv);
return err;
}
static int mlxbf_gige_remove(struct platform_device *pdev)
{
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
unregister_netdev(priv->netdev);
phy_disconnect(priv->netdev->phydev);
mlxbf_gige_mdio_remove(priv);
platform_set_drvdata(pdev, NULL);
return 0;
}
static void mlxbf_gige_shutdown(struct platform_device *pdev)
{
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
writeq(0, priv->base + MLXBF_GIGE_INT_EN);
mlxbf_gige_clean_port(priv);
}
static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
{ "MLNXBF17", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
static struct platform_driver mlxbf_gige_driver = {
.probe = mlxbf_gige_probe,
.remove = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
.name = KBUILD_MODNAME,
.acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
},
};
module_platform_driver(mlxbf_gige_driver);
MODULE_DESCRIPTION("Mellanox BlueField SoC Gigabit Ethernet Driver");
MODULE_AUTHOR("David Thompson <[email protected]>");
MODULE_AUTHOR("Asmaa Mnebhi <[email protected]>");
MODULE_LICENSE("Dual BSD/GPL");
|
linux-master
|
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
|
// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/* Interrupt related logic for Mellanox Gigabit Ethernet driver
*
* Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
*/
#include <linux/interrupt.h>
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
static irqreturn_t mlxbf_gige_error_intr(int irq, void *dev_id)
{
struct mlxbf_gige *priv;
u64 int_status;
priv = dev_id;
int_status = readq(priv->base + MLXBF_GIGE_INT_STATUS);
if (int_status & MLXBF_GIGE_INT_STATUS_HW_ACCESS_ERROR)
priv->stats.hw_access_errors++;
if (int_status & MLXBF_GIGE_INT_STATUS_TX_CHECKSUM_INPUTS) {
priv->stats.tx_invalid_checksums++;
/* This error condition is latched into MLXBF_GIGE_INT_STATUS
* when the GigE silicon operates on the offending
* TX WQE. The write to MLXBF_GIGE_INT_STATUS at the bottom
* of this routine clears this error condition.
*/
}
if (int_status & MLXBF_GIGE_INT_STATUS_TX_SMALL_FRAME_SIZE) {
priv->stats.tx_small_frames++;
/* This condition happens when the networking stack invokes
* this driver's "start_xmit()" method with a packet whose
* size < 60 bytes. The GigE silicon will automatically pad
* this small frame up to a minimum-sized frame before it is
* sent. The "tx_small_frame" condition is latched into the
* MLXBF_GIGE_INT_STATUS register when the GigE silicon
* operates on the offending TX WQE. The write to
* MLXBF_GIGE_INT_STATUS at the bottom of this routine
* clears this condition.
*/
}
if (int_status & MLXBF_GIGE_INT_STATUS_TX_PI_CI_EXCEED_WQ_SIZE)
priv->stats.tx_index_errors++;
if (int_status & MLXBF_GIGE_INT_STATUS_SW_CONFIG_ERROR)
priv->stats.sw_config_errors++;
if (int_status & MLXBF_GIGE_INT_STATUS_SW_ACCESS_ERROR)
priv->stats.sw_access_errors++;
/* Clear all error interrupts by writing '1' back to
* all the asserted bits in INT_STATUS. Do not write
* '1' back to 'receive packet' bit, since that is
* managed separately.
*/
int_status &= ~MLXBF_GIGE_INT_STATUS_RX_RECEIVE_PACKET;
writeq(int_status, priv->base + MLXBF_GIGE_INT_STATUS);
return IRQ_HANDLED;
}
static irqreturn_t mlxbf_gige_rx_intr(int irq, void *dev_id)
{
struct mlxbf_gige *priv;
priv = dev_id;
/* NOTE: GigE silicon automatically disables "packet rx" interrupt by
* setting MLXBF_GIGE_INT_MASK bit0 upon triggering the interrupt
* to the ARM cores. Software needs to re-enable "packet rx"
* interrupts by clearing MLXBF_GIGE_INT_MASK bit0.
*/
napi_schedule(&priv->napi);
return IRQ_HANDLED;
}
static irqreturn_t mlxbf_gige_llu_plu_intr(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
int mlxbf_gige_request_irqs(struct mlxbf_gige *priv)
{
int err;
err = request_irq(priv->error_irq, mlxbf_gige_error_intr, 0,
"mlxbf_gige_error", priv);
if (err) {
dev_err(priv->dev, "Request error_irq failure\n");
return err;
}
err = request_irq(priv->rx_irq, mlxbf_gige_rx_intr, 0,
"mlxbf_gige_rx", priv);
if (err) {
dev_err(priv->dev, "Request rx_irq failure\n");
goto free_error_irq;
}
err = request_irq(priv->llu_plu_irq, mlxbf_gige_llu_plu_intr, 0,
"mlxbf_gige_llu_plu", priv);
if (err) {
dev_err(priv->dev, "Request llu_plu_irq failure\n");
goto free_rx_irq;
}
return 0;
free_rx_irq:
free_irq(priv->rx_irq, priv);
free_error_irq:
free_irq(priv->error_irq, priv);
return err;
}
void mlxbf_gige_free_irqs(struct mlxbf_gige *priv)
{
free_irq(priv->error_irq, priv);
free_irq(priv->rx_irq, priv);
free_irq(priv->llu_plu_irq, priv);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c
|
/*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/mlx4/cmd.h>
#include <linux/cpu_rmap.h>
#include "mlx4.h"
#include "fw.h"
enum {
MLX4_IRQNAME_SIZE = 32
};
enum {
MLX4_NUM_ASYNC_EQE = 0x100,
MLX4_NUM_SPARE_EQE = 0x80,
MLX4_EQ_ENTRY_SIZE = 0x20
};
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
#define MLX4_EQ_OWNER_HW ( 1 << 24)
#define MLX4_EQ_FLAG_EC ( 1 << 18)
#define MLX4_EQ_FLAG_OI ( 1 << 17)
#define MLX4_EQ_STATE_ARMED ( 9 << 8)
#define MLX4_EQ_STATE_FIRED (10 << 8)
#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
(1ull << MLX4_EVENT_TYPE_COMM_EST) | \
(1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
(1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
(1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD) | \
(1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
(1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
(1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
(1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
static u64 get_async_ev_mask(struct mlx4_dev *dev)
{
u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
return async_ev_mask;
}
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
req_not << 31),
eq->doorbell);
/* We still want ordering, just not swabbing, so add a barrier */
wmb();
}
static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
u8 eqe_size)
{
/* (entry & (eq->nent - 1)) gives us a cyclic array */
unsigned long offset = (entry & (eq->nent - 1)) * eqe_size;
/* CX3 is capable of extending the EQE from 32 to 64 bytes with
* strides of 64B,128B and 256B.
* When 64B EQE is used, the first (in the lower addresses)
* 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
* contain the legacy EQE information.
* In all other cases, the first 32B contains the legacy EQE info.
*/
return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
}
static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size)
{
struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size);
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
{
struct mlx4_eqe *eqe =
&slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
return (!!(eqe->owner & 0x80) ^
!!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
eqe : NULL;
}
void mlx4_gen_slave_eqe(struct work_struct *work)
{
struct mlx4_mfunc_master_ctx *master =
container_of(work, struct mlx4_mfunc_master_ctx,
slave_event_work);
struct mlx4_mfunc *mfunc =
container_of(master, struct mlx4_mfunc, master);
struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
struct mlx4_eqe *eqe;
u8 slave;
int i, phys_port, slave_port;
for (eqe = next_slave_event_eqe(slave_eq); eqe;
eqe = next_slave_event_eqe(slave_eq)) {
slave = eqe->slave_id;
if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE &&
eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN &&
mlx4_is_bonded(dev)) {
struct mlx4_port_cap port_cap;
if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state)
goto consume;
if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state)
goto consume;
}
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
for (i = 0; i <= dev->persist->num_vfs; i++) {
phys_port = 0;
if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
phys_port = eqe->event.port_mgmt_change.port;
slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
if (slave_port < 0) /* VF doesn't have this port */
continue;
eqe->event.port_mgmt_change.port = slave_port;
}
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
i);
if (phys_port)
eqe->event.port_mgmt_change.port = phys_port;
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
slave);
}
consume:
++slave_eq->cons;
}
}
static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
struct mlx4_eqe *s_eqe;
unsigned long flags;
spin_lock_irqsave(&slave_eq->event_lock, flags);
s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
slave);
spin_unlock_irqrestore(&slave_eq->event_lock, flags);
return;
}
memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
dma_wmb();
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
++slave_eq->prod;
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_event_work);
spin_unlock_irqrestore(&slave_eq->event_lock, flags);
}
static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (slave < 0 || slave > dev->persist->num_vfs ||
slave == dev->caps.function ||
!priv->mfunc.master.slave_state[slave].active)
return;
slave_event(dev, slave, eqe);
}
#if defined(CONFIG_SMP)
static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
{
int hint_err;
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
if (!cpumask_available(eq->affinity_mask) ||
cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
if (hint_err)
mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
}
#endif
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_eqe eqe;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
if (!s_slave->active)
return 0;
memset(&eqe, 0, sizeof(eqe));
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_eqe eqe;
/*don't send if we don't have the that slave */
if (dev->persist->num_vfs < slave)
return 0;
memset(&eqe, 0, sizeof(eqe));
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
u8 port_subtype_change)
{
struct mlx4_eqe eqe;
u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
/*don't send if we don't have the that slave */
if (dev->persist->num_vfs < slave)
return 0;
memset(&eqe, 0, sizeof(eqe));
eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
eqe.subtype = port_subtype_change;
eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
port_subtype_change, slave, port);
return mlx4_GEN_EQE(dev, slave, &eqe);
}
EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return SLAVE_PORT_DOWN;
}
return s_state[slave].port_state[port];
}
EXPORT_SYMBOL(mlx4_get_slave_port_state);
static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
enum slave_port_state state)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return -1;
}
s_state[slave].port_state[port] = state;
return 0;
}
static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
{
int i;
enum slave_port_gen_event gen_event;
struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
port);
for (i = 0; i < dev->persist->num_vfs + 1; i++)
if (test_bit(i, slaves_pport.slaves))
set_and_calc_slave_port_state(dev, i, port,
event, &gen_event);
}
/**************************************************************************
The function get as input the new event to that port,
and according to the prev state change the slave's port state.
The events are:
MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
MLX4_PORT_STATE_DEV_EVENT_PORT_UP
MLX4_PORT_STATE_IB_EVENT_GID_VALID
MLX4_PORT_STATE_IB_EVENT_GID_INVALID
***************************************************************************/
int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
u8 port, int event,
enum slave_port_gen_event *gen_event)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *ctx = NULL;
unsigned long flags;
int ret = -1;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
enum slave_port_state cur_state =
mlx4_get_slave_port_state(dev, slave, port);
*gen_event = SLAVE_PORT_GEN_EVENT_NONE;
if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port);
return ret;
}
ctx = &priv->mfunc.master.slave_state[slave];
spin_lock_irqsave(&ctx->lock, flags);
switch (cur_state) {
case SLAVE_PORT_DOWN:
if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
mlx4_set_slave_port_state(dev, slave, port,
SLAVE_PENDING_UP);
break;
case SLAVE_PENDING_UP:
if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
mlx4_set_slave_port_state(dev, slave, port,
SLAVE_PORT_DOWN);
else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
mlx4_set_slave_port_state(dev, slave, port,
SLAVE_PORT_UP);
*gen_event = SLAVE_PORT_GEN_EVENT_UP;
}
break;
case SLAVE_PORT_UP:
if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
mlx4_set_slave_port_state(dev, slave, port,
SLAVE_PORT_DOWN);
*gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
} else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
event) {
mlx4_set_slave_port_state(dev, slave, port,
SLAVE_PENDING_UP);
*gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
}
break;
default:
pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
__func__, slave, port);
goto out;
}
ret = mlx4_get_slave_port_state(dev, slave, port);
out:
spin_unlock_irqrestore(&ctx->lock, flags);
return ret;
}
EXPORT_SYMBOL(set_and_calc_slave_port_state);
int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
{
struct mlx4_eqe eqe;
memset(&eqe, 0, sizeof(eqe));
eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
eqe.event.port_mgmt_change.port = port;
eqe.event.port_mgmt_change.params.port_info.changed_attr =
cpu_to_be32((u32) attr);
slave_event(dev, ALL_SLAVES, &eqe);
return 0;
}
EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
void mlx4_master_handle_slave_flr(struct work_struct *work)
{
struct mlx4_mfunc_master_ctx *master =
container_of(work, struct mlx4_mfunc_master_ctx,
slave_flr_event_work);
struct mlx4_mfunc *mfunc =
container_of(master, struct mlx4_mfunc, master);
struct mlx4_priv *priv =
container_of(mfunc, struct mlx4_priv, mfunc);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int i;
int err;
unsigned long flags;
mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
for (i = 0 ; i < dev->num_slaves; i++) {
if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
i);
/* In case of 'Reset flow' FLR can be generated for
* a slave before mlx4_load_one is done.
* make sure interface is up before trying to delete
* slave resources which weren't allocated yet.
*/
if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_UP)
mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
slave_state[i].is_slave_going_down = 0;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*notify the FW:*/
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
i);
}
}
}
static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe;
int cqn;
int eqes_found = 0;
int set_ci = 0;
int port;
int slave = 0;
int ret;
int flr_slave;
u8 update_slave_state;
int i;
enum slave_port_gen_event gen_event;
unsigned long flags;
struct mlx4_vport_state *s_info;
int eqe_size = dev->caps.eqe_size;
while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
switch (eqe->type) {
case MLX4_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
mlx4_cq_completion(dev, cqn);
break;
case MLX4_EVENT_TYPE_PATH_MIG:
case MLX4_EVENT_TYPE_COMM_EST:
case MLX4_EVENT_TYPE_SQ_DRAINED:
case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
mlx4_dbg(dev, "event %d arrived\n", eqe->type);
if (mlx4_is_master(dev)) {
/* forward only to slave owning the QP */
ret = mlx4_get_slave_from_resource_id(dev,
RES_QP,
be32_to_cpu(eqe->event.qp.qpn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
}
if (!ret && slave != dev->caps.function) {
mlx4_slave_event(dev, slave, eqe);
break;
}
}
mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
fallthrough;
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
ret = mlx4_get_slave_from_resource_id(dev,
RES_SRQ,
be32_to_cpu(eqe->event.srq.srqn)
& 0xffffff,
&slave);
if (ret && ret != -ENOENT) {
mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
}
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
}
mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_CMD:
mlx4_cmd_event(dev,
be16_to_cpu(eqe->event.cmd.token),
eqe->event.cmd.status,
be64_to_cpu(eqe->event.cmd.out_param));
break;
case MLX4_EVENT_TYPE_PORT_CHANGE: {
struct mlx4_slaves_pport slaves_port;
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
slaves_port = mlx4_phys_to_slaves_pport(dev, port);
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
mlx4_dispatch_event(
dev, MLX4_DEV_EVENT_PORT_DOWN, &port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
if (!mlx4_is_master(dev))
break;
for (i = 0; i < dev->persist->num_vfs + 1;
i++) {
int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
continue;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev))
continue;
mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
| (reported_port << 28));
mlx4_slave_event(dev, i, eqe);
}
} else { /* IB port */
set_and_calc_slave_port_state(dev, i, port,
MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
&gen_event);
/*we can be in pending state, then do not send port_down event*/
if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
if (i == mlx4_master_func_num(dev))
continue;
eqe->event.port_change.port =
cpu_to_be32(
(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
| (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
}
}
}
} else {
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
&port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
if (!mlx4_is_master(dev))
break;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
for (i = 0;
i < dev->persist->num_vfs + 1;
i++) {
int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
continue;
if (i == mlx4_master_func_num(dev))
continue;
s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
| (reported_port << 28));
mlx4_slave_event(dev, i, eqe);
}
}
else /* IB port */
/* port-up event will be sent to a slave when the
* slave's alias-guid is set. This is done in alias_GUID.c
*/
set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
}
break;
}
case MLX4_EVENT_TYPE_CQ_ERROR:
mlx4_warn(dev, "CQ %s on CQN %06x\n",
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
if (mlx4_is_master(dev)) {
ret = mlx4_get_slave_from_resource_id(dev,
RES_CQ,
be32_to_cpu(eqe->event.cq_err.cqn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
}
if (!ret && slave != dev->caps.function) {
mlx4_slave_event(dev, slave, eqe);
break;
}
}
mlx4_cq_event(dev,
be32_to_cpu(eqe->event.cq_err.cqn)
& 0xffffff,
eqe->type);
break;
case MLX4_EVENT_TYPE_EQ_OVERFLOW:
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
case MLX4_EVENT_TYPE_OP_REQUIRED:
atomic_inc(&priv->opreq_count);
/* FW commands can't be executed from interrupt context
* working in deferred task
*/
queue_work(mlx4_wq, &priv->opreq_task);
break;
case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Received comm channel event for non master device\n");
break;
}
memcpy(&priv->mfunc.master.comm_arm_bit_vector,
eqe->event.comm_channel_arm.bit_vec,
sizeof(eqe->event.comm_channel_arm.bit_vec));
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.comm_work);
break;
case MLX4_EVENT_TYPE_FLR_EVENT:
flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Non-master function received FLR event\n");
break;
}
mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
if (flr_slave >= dev->num_slaves) {
mlx4_warn(dev,
"Got FLR for unknown function: %d\n",
flr_slave);
update_slave_state = 0;
} else
update_slave_state = 1;
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (update_slave_state) {
priv->mfunc.master.slave_state[flr_slave].active = false;
priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
}
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
&flr_slave);
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_flr_event_work);
break;
case MLX4_EVENT_TYPE_FATAL_WARNING:
if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
if (mlx4_is_master(dev))
for (i = 0; i < dev->num_slaves; i++) {
mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
__func__, i);
if (i == dev->caps.function)
continue;
mlx4_slave_event(dev, i, eqe);
}
mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
be16_to_cpu(eqe->event.warming.warning_threshold),
be16_to_cpu(eqe->event.warming.current_temperature));
} else
mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
mlx4_dispatch_event(
dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, eqe);
break;
case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
switch (eqe->subtype) {
case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
mlx4_warn(dev, "Bad cable detected on port %u\n",
eqe->event.bad_cable.port);
break;
case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
mlx4_warn(dev, "Unsupported cable detected\n");
break;
default:
mlx4_dbg(dev,
"Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
}
break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
}
++eq->cons_index;
eqes_found = 1;
++set_ci;
/*
* The HCA will think the queue has overflowed if we
* don't tell it we've been processing events. We
* create our EQs with MLX4_NUM_SPARE_EQE extra
* entries, so we must update our consumer index at
* least that often.
*/
if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
eq_set_ci(eq, 0);
set_ci = 0;
}
}
eq_set_ci(eq, 1);
return eqes_found;
}
static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
{
struct mlx4_dev *dev = dev_ptr;
struct mlx4_priv *priv = mlx4_priv(dev);
int work = 0;
int i;
writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
return IRQ_RETVAL(work);
}
static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
{
struct mlx4_eq *eq = eq_ptr;
struct mlx4_dev *dev = eq->dev;
mlx4_eq_int(dev, eq);
/* MSI-X vectors always belong to us */
return IRQ_HANDLED;
}
int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_event_eq_info *event_eq =
priv->mfunc.master.slave_state[slave].event_eq;
u32 in_modifier = vhcr->in_modifier;
u32 eqn = in_modifier & 0x3FF;
u64 in_param = vhcr->in_param;
int err = 0;
int i;
if (slave == dev->caps.function)
err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
if (!err)
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
if (in_param & (1LL << i))
event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
return err;
}
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
}
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
{
return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_num_eq_uar(struct mlx4_dev *dev)
{
/*
* Each UAR holds 4 EQ doorbells. To figure out how many UARs
* we need to map, take the difference of highest index and
* the lowest index we'll use and add 1.
*/
return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
dev->caps.reserved_eqs / 4 + 1;
}
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int index;
index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
if (!priv->eq_table.uar_map[index]) {
priv->eq_table.uar_map[index] =
ioremap(
pci_resource_start(dev->persist->pdev, 2) +
((eq->eqn / 4) << (dev->uar_page_shift)),
(1 << (dev->uar_page_shift)));
if (!priv->eq_table.uar_map[index]) {
mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
eq->eqn);
return NULL;
}
}
return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
}
static void mlx4_unmap_uar(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i]) {
iounmap(priv->eq_table.uar_map[i]);
priv->eq_table.uar_map[i] = NULL;
}
}
static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
u8 intr, struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_eq_context *eq_context;
int npages;
u64 *dma_list = NULL;
dma_addr_t t;
u64 mtt_addr;
int err = -ENOMEM;
int i;
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
/* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
* strides of 64B,128B and 256B.
*/
npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE;
eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list),
GFP_KERNEL);
if (!eq->page_list)
goto err_out;
for (i = 0; i < npages; ++i)
eq->page_list[i].buf = NULL;
dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL);
if (!dma_list)
goto err_out_free;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
goto err_out_free;
eq_context = mailbox->buf;
for (i = 0; i < npages; ++i) {
eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
pdev->dev,
PAGE_SIZE, &t,
GFP_KERNEL);
if (!eq->page_list[i].buf)
goto err_out_free_pages;
dma_list[i] = t;
eq->page_list[i].map = t;
}
eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
if (eq->eqn == -1)
goto err_out_free_pages;
eq->doorbell = mlx4_get_eq_uar(dev, eq);
if (!eq->doorbell) {
err = -ENOMEM;
goto err_out_free_eq;
}
err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
if (err)
goto err_out_free_eq;
err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
if (err)
goto err_out_free_mtt;
eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
MLX4_EQ_STATE_ARMED);
eq_context->log_eq_size = ilog2(eq->nent);
eq_context->intr = intr;
eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
eq_context->mtt_base_addr_h = mtt_addr >> 32;
eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
if (err) {
mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
goto err_out_free_mtt;
}
kfree(dma_list);
mlx4_free_cmd_mailbox(dev, mailbox);
eq->cons_index = 0;
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
tasklet_setup(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb);
return err;
err_out_free_mtt:
mlx4_mtt_cleanup(dev, &eq->mtt);
err_out_free_eq:
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
err_out_free_pages:
for (i = 0; i < npages; ++i)
if (eq->page_list[i].buf)
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
eq->page_list[i].map);
mlx4_free_cmd_mailbox(dev, mailbox);
err_out_free:
kfree(eq->page_list);
kfree(dma_list);
err_out:
return err;
}
static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_eq *eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
int i;
/* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
* strides of 64B,128B and 256B
*/
int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
err = mlx4_HW2SW_EQ(dev, eq->eqn);
if (err)
mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
synchronize_irq(eq->irq);
tasklet_disable(&eq->tasklet_ctx.task);
mlx4_mtt_cleanup(dev, &eq->mtt);
for (i = 0; i < npages; ++i)
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
eq->page_list[i].map);
kfree(eq->page_list);
mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
}
static void mlx4_free_irqs(struct mlx4_dev *dev)
{
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
int i;
if (eq_table->have_irq)
free_irq(dev->persist->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
free_cpumask_var(eq_table->eq[i].affinity_mask);
irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
kfree(eq_table->irq_names);
}
static int mlx4_map_clr_int(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) {
mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
return -ENOMEM;
}
return 0;
}
static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
iounmap(priv->clr_base);
}
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
sizeof(*priv->eq_table.eq), GFP_KERNEL);
if (!priv->eq_table.eq)
return -ENOMEM;
return 0;
}
void mlx4_free_eq_table(struct mlx4_dev *dev)
{
kfree(mlx4_priv(dev)->eq_table.eq);
}
int mlx4_init_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
int i;
priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
sizeof(*priv->eq_table.uar_map),
GFP_KERNEL);
if (!priv->eq_table.uar_map) {
err = -ENOMEM;
goto err_out_free;
}
err = mlx4_bitmap_init(&priv->eq_table.bitmap,
roundup_pow_of_two(dev->caps.num_eqs),
dev->caps.num_eqs - 1,
dev->caps.reserved_eqs,
roundup_pow_of_two(dev->caps.num_eqs) -
dev->caps.num_eqs);
if (err)
goto err_out_free;
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
priv->eq_table.uar_map[i] = NULL;
if (!mlx4_is_slave(dev)) {
err = mlx4_map_clr_int(dev);
if (err)
goto err_out_bitmap;
priv->eq_table.clr_mask =
swab32(1 << (priv->eq_table.inta_pin & 31));
priv->eq_table.clr_int = priv->clr_base +
(priv->eq_table.inta_pin < 32 ? 4 : 0);
}
priv->eq_table.irq_names =
kmalloc_array(MLX4_IRQNAME_SIZE,
(dev->caps.num_comp_vectors + 1),
GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
goto err_out_clr_int;
}
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
if (i == MLX4_EQ_ASYNC) {
err = mlx4_create_eq(dev,
MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
} else {
struct mlx4_eq *eq = &priv->eq_table.eq[i];
#ifdef CONFIG_RFS_ACCEL
int port = find_first_bit(eq->actv_ports.ports,
dev->caps.num_ports) + 1;
if (port <= dev->caps.num_ports) {
struct mlx4_port_info *info =
&mlx4_priv(dev)->port[port];
if (!info->rmap) {
info->rmap = alloc_irq_cpu_rmap(
mlx4_get_eqs_per_port(dev, port));
if (!info->rmap) {
mlx4_warn(dev, "Failed to allocate cpu rmap\n");
err = -ENOMEM;
goto err_out_unmap;
}
}
err = irq_cpu_rmap_add(
info->rmap, eq->irq);
if (err)
mlx4_warn(dev, "Failed adding irq rmap\n");
}
#endif
err = mlx4_create_eq(dev, dev->quotas.cq +
MLX4_NUM_SPARE_EQE,
(dev->flags & MLX4_FLAG_MSI_X) ?
i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
eq);
}
if (err)
goto err_out_unmap;
}
if (dev->flags & MLX4_FLAG_MSI_X) {
const char *eq_name;
snprintf(priv->eq_table.irq_names +
MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE,
"mlx4-async@pci:%s",
pci_name(dev->persist->pdev));
eq_name = priv->eq_table.irq_names +
MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
mlx4_msi_x_interrupt, 0, eq_name,
priv->eq_table.eq + MLX4_EQ_ASYNC);
if (err)
goto err_out_unmap;
priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
} else {
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
DRV_NAME "@pci:%s",
pci_name(dev->persist->pdev));
err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
goto err_out_unmap;
priv->eq_table.have_irq = 1;
}
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
/* arm ASYNC eq */
eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
return 0;
err_out_unmap:
while (i > 0)
mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
#ifdef CONFIG_RFS_ACCEL
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_priv(dev)->port[i].rmap) {
free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
mlx4_priv(dev)->port[i].rmap = NULL;
}
}
#endif
mlx4_free_irqs(dev);
err_out_clr_int:
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
err_out_bitmap:
mlx4_unmap_uar(dev);
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
err_out_free:
kfree(priv->eq_table.uar_map);
return err;
}
void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
#ifdef CONFIG_RFS_ACCEL
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_priv(dev)->port[i].rmap) {
free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
mlx4_priv(dev)->port[i].rmap = NULL;
}
}
#endif
mlx4_free_irqs(dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
mlx4_unmap_uar(dev);
mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
kfree(priv->eq_table.uar_map);
}
/* A test that verifies that we can accept interrupts
* on the vector allocated for asynchronous events
*/
int mlx4_test_async(struct mlx4_dev *dev)
{
return mlx4_NOP(dev);
}
EXPORT_SYMBOL(mlx4_test_async);
/* A test that verifies that we can accept interrupts
* on the given irq vector of the tested port.
* Interrupts are checked using the NOP command.
*/
int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
/* Map the new eq to handle all asynchronous events */
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
if (err) {
mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
goto out;
}
/* Go back to using events */
mlx4_cmd_use_events(dev);
err = mlx4_NOP(dev);
/* Return to default */
mlx4_cmd_use_polling(dev);
out:
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
mlx4_cmd_use_events(dev);
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupt);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
vector = MLX4_CQ_TO_EQ_VECTOR(vector);
if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
(vector == MLX4_EQ_ASYNC))
return false;
return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
}
EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
unsigned int i;
unsigned int sum = 0;
for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
sum += !!test_bit(port - 1,
priv->eq_table.eq[i].actv_ports.ports);
return sum;
}
EXPORT_SYMBOL(mlx4_get_eqs_per_port);
int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
vector = MLX4_CQ_TO_EQ_VECTOR(vector);
if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
return -EINVAL;
return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
dev->caps.num_ports) > 1);
}
EXPORT_SYMBOL(mlx4_is_eq_shared);
struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
{
return mlx4_priv(dev)->port[port].rmap;
}
EXPORT_SYMBOL(mlx4_get_cpu_rmap);
int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err = 0, i = 0;
u32 min_ref_count_val = (u32)-1;
int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
int *prequested_vector = NULL;
mutex_lock(&priv->msix_ctl.pool_lock);
if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
(requested_vector >= 0) &&
(requested_vector != MLX4_EQ_ASYNC)) {
if (test_bit(port - 1,
priv->eq_table.eq[requested_vector].actv_ports.ports)) {
prequested_vector = &requested_vector;
} else {
struct mlx4_eq *eq;
for (i = 1; i < port;
requested_vector += mlx4_get_eqs_per_port(dev, i++))
;
eq = &priv->eq_table.eq[requested_vector];
if (requested_vector < dev->caps.num_comp_vectors + 1 &&
test_bit(port - 1, eq->actv_ports.ports)) {
prequested_vector = &requested_vector;
}
}
}
if (!prequested_vector) {
requested_vector = -1;
for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
i++) {
struct mlx4_eq *eq = &priv->eq_table.eq[i];
if (min_ref_count_val > eq->ref_count &&
test_bit(port - 1, eq->actv_ports.ports)) {
min_ref_count_val = eq->ref_count;
requested_vector = i;
}
}
if (requested_vector < 0) {
err = -ENOSPC;
goto err_unlock;
}
prequested_vector = &requested_vector;
}
if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
dev->flags & MLX4_FLAG_MSI_X) {
set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
snprintf(priv->eq_table.irq_names +
*prequested_vector * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
*prequested_vector, dev_name(&dev->persist->pdev->dev));
err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
mlx4_msi_x_interrupt, 0,
&priv->eq_table.irq_names[*prequested_vector << 5],
priv->eq_table.eq + *prequested_vector);
if (err) {
clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
*prequested_vector = -1;
} else {
#if defined(CONFIG_SMP)
mlx4_set_eq_affinity_hint(priv, *prequested_vector);
#endif
eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
priv->eq_table.eq[*prequested_vector].have_irq = 1;
}
}
if (!err && *prequested_vector >= 0)
priv->eq_table.eq[*prequested_vector].ref_count++;
err_unlock:
mutex_unlock(&priv->msix_ctl.pool_lock);
if (!err && *prequested_vector >= 0)
*vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
else
*vector = 0;
return err;
}
EXPORT_SYMBOL(mlx4_assign_eq);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
}
EXPORT_SYMBOL(mlx4_eq_get_irq);
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
mutex_lock(&priv->msix_ctl.pool_lock);
priv->eq_table.eq[eq_vec].ref_count--;
/* once we allocated EQ, we don't release it because it might be binded
* to cpu_rmap.
*/
mutex_unlock(&priv->msix_ctl.pool_lock);
}
EXPORT_SYMBOL(mlx4_release_eq);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/eq.c
|
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
* All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/export.h>
#include "fw_qos.h"
#include "fw.h"
enum {
/* allocate vpp opcode modifiers */
MLX4_ALLOCATE_VPP_ALLOCATE = 0x0,
MLX4_ALLOCATE_VPP_QUERY = 0x1
};
enum {
/* set vport qos opcode modifiers */
MLX4_SET_VPORT_QOS_SET = 0x0,
MLX4_SET_VPORT_QOS_QUERY = 0x1
};
struct mlx4_set_port_prio2tc_context {
u8 prio2tc[4];
};
struct mlx4_port_scheduler_tc_cfg_be {
__be16 pg;
__be16 bw_precentage;
__be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
__be16 max_bw_value;
};
struct mlx4_set_port_scheduler_context {
struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
};
/* Granular Qos (per VF) section */
struct mlx4_alloc_vpp_param {
__be32 available_vpp;
__be32 vpp_p_up[MLX4_NUM_UP];
};
struct mlx4_prio_qos_param {
__be32 bw_share;
__be32 max_avg_bw;
__be32 reserved;
__be32 enable;
__be32 reserved1[4];
};
struct mlx4_set_vport_context {
__be32 reserved[8];
struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP];
};
int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_prio2tc_context *context;
int err;
u32 in_mod;
int i;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
for (i = 0; i < MLX4_NUM_UP; i += 2)
context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
u8 *pg, u16 *ratelimit)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_scheduler_context *context;
int err;
u32 in_mod;
int i;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
for (i = 0; i < MLX4_NUM_TC; i++) {
struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
u16 r;
if (ratelimit && ratelimit[i]) {
if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
r = ratelimit[i];
tc->max_bw_units =
htons(MLX4_RATELIMIT_100M_UNITS);
} else {
r = ratelimit[i] / 10;
tc->max_bw_units =
htons(MLX4_RATELIMIT_1G_UNITS);
}
tc->max_bw_value = htons(r);
} else {
tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
}
tc->pg = htons(pg[i]);
tc->bw_precentage = htons(tc_tx_bw[i]);
}
in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port,
u16 *available_vpp, u8 *vpp_p_up)
{
int i;
int err;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_alloc_vpp_param *out_param;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
out_param = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, port,
MLX4_ALLOCATE_VPP_QUERY,
MLX4_CMD_ALLOCATE_VPP,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
goto out;
/* Total number of supported VPPs */
*available_vpp = (u16)be32_to_cpu(out_param->available_vpp);
for (i = 0; i < MLX4_NUM_UP; i++)
vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get);
int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up)
{
int i;
int err;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_alloc_vpp_param *in_param;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
in_param = mailbox->buf;
for (i = 0; i < MLX4_NUM_UP; i++)
in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]);
err = mlx4_cmd(dev, mailbox->dma, port,
MLX4_ALLOCATE_VPP_ALLOCATE,
MLX4_CMD_ALLOCATE_VPP,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set);
int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
struct mlx4_vport_qos_param *out_param)
{
int i;
int err;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_vport_context *ctx;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ctx = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port,
MLX4_SET_VPORT_QOS_QUERY,
MLX4_CMD_SET_VPORT_QOS,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
goto out;
for (i = 0; i < MLX4_NUM_UP; i++) {
out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share);
out_param[i].max_avg_bw =
be32_to_cpu(ctx->qos_p_up[i].max_avg_bw);
out_param[i].enable =
!!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31);
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get);
int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport,
struct mlx4_vport_qos_param *in_param)
{
int i;
int err;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_vport_context *ctx;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
ctx = mailbox->buf;
for (i = 0; i < MLX4_NUM_UP; i++) {
ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share);
ctx->qos_p_up[i].max_avg_bw =
cpu_to_be32(in_param[i].max_avg_bw);
ctx->qos_p_up[i].enable =
cpu_to_be32(in_param[i].enable << 31);
}
err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port,
MLX4_SET_VPORT_QOS_SET,
MLX4_CMD_SET_VPORT_QOS,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/fw_qos.c
|
/*
* Copyright (c) 2018, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "mlx4.h"
#define BAD_ACCESS 0xBADACCE5
#define HEALTH_BUFFER_SIZE 0x40
#define CR_ENABLE_BIT swab32(BIT(6))
#define CR_ENABLE_BIT_OFFSET 0xF3F04
#define MAX_NUM_OF_DUMPS_TO_STORE (8)
#define REGION_CR_SPACE "cr-space"
#define REGION_FW_HEALTH "fw-health"
static const char * const region_cr_space_str = REGION_CR_SPACE;
static const char * const region_fw_health_str = REGION_FW_HEALTH;
static const struct devlink_region_ops region_cr_space_ops = {
.name = REGION_CR_SPACE,
.destructor = &kvfree,
};
static const struct devlink_region_ops region_fw_health_ops = {
.name = REGION_FW_HEALTH,
.destructor = &kvfree,
};
/* Set to true in case cr enable bit was set to true before crdump */
static bool crdump_enbale_bit_set;
static void crdump_enable_crspace_access(struct mlx4_dev *dev,
u8 __iomem *cr_space)
{
/* Get current enable bit value */
crdump_enbale_bit_set =
readl(cr_space + CR_ENABLE_BIT_OFFSET) & CR_ENABLE_BIT;
/* Enable FW CR filter (set bit6 to 0) */
if (crdump_enbale_bit_set)
writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) & ~CR_ENABLE_BIT,
cr_space + CR_ENABLE_BIT_OFFSET);
/* Enable block volatile crspace accesses */
writel(swab32(1), cr_space + dev->caps.health_buffer_addrs +
HEALTH_BUFFER_SIZE);
}
static void crdump_disable_crspace_access(struct mlx4_dev *dev,
u8 __iomem *cr_space)
{
/* Disable block volatile crspace accesses */
writel(0, cr_space + dev->caps.health_buffer_addrs +
HEALTH_BUFFER_SIZE);
/* Restore FW CR filter value (set bit6 to original value) */
if (crdump_enbale_bit_set)
writel(readl(cr_space + CR_ENABLE_BIT_OFFSET) | CR_ENABLE_BIT,
cr_space + CR_ENABLE_BIT_OFFSET);
}
static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
u8 __iomem *cr_space,
u32 id)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
struct pci_dev *pdev = dev->persist->pdev;
unsigned long cr_res_size;
u8 *crspace_data;
int offset;
int err;
if (!crdump->region_crspace) {
mlx4_err(dev, "crdump: cr-space region is NULL\n");
return;
}
/* Try to collect CR space */
cr_res_size = pci_resource_len(pdev, 0);
crspace_data = kvmalloc(cr_res_size, GFP_KERNEL);
if (crspace_data) {
for (offset = 0; offset < cr_res_size; offset += 4)
*(u32 *)(crspace_data + offset) =
readl(cr_space + offset);
err = devlink_region_snapshot_create(crdump->region_crspace,
crspace_data, id);
if (err) {
kvfree(crspace_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
region_cr_space_str, id, err);
} else {
mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
id, region_cr_space_str);
}
} else {
mlx4_err(dev, "crdump: Failed to allocate crspace buffer\n");
}
}
static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
u8 __iomem *cr_space,
u32 id)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
u8 *health_data;
int offset;
int err;
if (!crdump->region_fw_health) {
mlx4_err(dev, "crdump: fw-health region is NULL\n");
return;
}
/* Try to collect health buffer */
health_data = kvmalloc(HEALTH_BUFFER_SIZE, GFP_KERNEL);
if (health_data) {
u8 __iomem *health_buf_start =
cr_space + dev->caps.health_buffer_addrs;
for (offset = 0; offset < HEALTH_BUFFER_SIZE; offset += 4)
*(u32 *)(health_data + offset) =
readl(health_buf_start + offset);
err = devlink_region_snapshot_create(crdump->region_fw_health,
health_data, id);
if (err) {
kvfree(health_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
region_fw_health_str, id, err);
} else {
mlx4_info(dev, "crdump: added snapshot %d to devlink region %s\n",
id, region_fw_health_str);
}
} else {
mlx4_err(dev, "crdump: Failed to allocate health buffer\n");
}
}
int mlx4_crdump_collect(struct mlx4_dev *dev)
{
struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
struct pci_dev *pdev = dev->persist->pdev;
unsigned long cr_res_size;
u8 __iomem *cr_space;
int err;
u32 id;
if (!dev->caps.health_buffer_addrs) {
mlx4_info(dev, "crdump: FW doesn't support health buffer access, skipping\n");
return 0;
}
if (!crdump->snapshot_enable) {
mlx4_info(dev, "crdump: devlink snapshot disabled, skipping\n");
return 0;
}
cr_res_size = pci_resource_len(pdev, 0);
cr_space = ioremap(pci_resource_start(pdev, 0), cr_res_size);
if (!cr_space) {
mlx4_err(dev, "crdump: Failed to map pci cr region\n");
return -ENODEV;
}
/* Get the available snapshot ID for the dumps */
err = devlink_region_snapshot_id_get(devlink, &id);
if (err) {
mlx4_err(dev, "crdump: devlink get snapshot id err %d\n", err);
iounmap(cr_space);
return err;
}
crdump_enable_crspace_access(dev, cr_space);
/* Try to capture dumps */
mlx4_crdump_collect_crspace(dev, cr_space, id);
mlx4_crdump_collect_fw_health(dev, cr_space, id);
/* Release reference on the snapshot id */
devlink_region_snapshot_id_put(devlink, id);
crdump_disable_crspace_access(dev, cr_space);
iounmap(cr_space);
return 0;
}
int mlx4_crdump_init(struct mlx4_dev *dev)
{
struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
struct pci_dev *pdev = dev->persist->pdev;
crdump->snapshot_enable = false;
/* Create cr-space region */
crdump->region_crspace =
devl_region_create(devlink,
®ion_cr_space_ops,
MAX_NUM_OF_DUMPS_TO_STORE,
pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_cr_space_str,
PTR_ERR(crdump->region_crspace));
/* Create fw-health region */
crdump->region_fw_health =
devl_region_create(devlink,
®ion_fw_health_ops,
MAX_NUM_OF_DUMPS_TO_STORE,
HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_fw_health_str,
PTR_ERR(crdump->region_fw_health));
return 0;
}
void mlx4_crdump_end(struct mlx4_dev *dev)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
devl_region_destroy(crdump->region_fw_health);
devl_region_destroy(crdump->region_crspace);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/crdump.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include "mlx4_en.h"
MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
static const char mlx4_en_version[] =
DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
DRV_VERSION "\n";
#define MLX4_EN_PARM_INT(X, def_val, desc) \
static unsigned int X = def_val;\
module_param(X , uint, 0444); \
MODULE_PARM_DESC(X, desc);
/*
* Device scope module parameters
*/
/* Enable RSS UDP traffic */
MLX4_EN_PARM_INT(udp_rss, 1,
"Enable RSS for incoming UDP traffic or disabled (0)");
/* Priority pausing */
MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
" Per priority bit mask");
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
" Per priority bit mask");
MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
"Threshold for using inline data (range: 17-104, default: 104)");
#define MAX_PFC_TX 0xff
#define MAX_PFC_RX 0xff
void en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...)
{
va_list args;
struct va_format vaf;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
if (priv->registered)
printk("%s%s: %s: %pV",
level, DRV_NAME, priv->dev->name, &vaf);
else
printk("%s%s: %s: Port %d: %pV",
level, DRV_NAME, dev_name(&priv->mdev->pdev->dev),
priv->port, &vaf);
va_end(args);
}
void mlx4_en_update_loopback_state(struct net_device *dev,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (features & NETIF_F_LOOPBACK)
priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
else
priv->ctrl_flags &= cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
/* Drop the packet if SRIOV is not enabled
* and not performing the selftest or flb disabled
*/
if (mlx4_is_mfunc(priv->mdev->dev) &&
!(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
/* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
* is requested
*/
if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
mutex_lock(&priv->mdev->state_lock);
if ((priv->mdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB) &&
priv->rss_map.indir_qp && priv->rss_map.indir_qp->qpn) {
int i;
int err = 0;
int loopback = !!(features & NETIF_F_LOOPBACK);
for (i = 0; i < priv->rx_ring_num; i++) {
int ret;
ret = mlx4_en_change_mcast_lb(priv,
&priv->rss_map.qps[i],
loopback);
if (!err)
err = ret;
}
if (err)
mlx4_warn(priv->mdev, "failed to change mcast loopback\n");
}
mutex_unlock(&priv->mdev->state_lock);
}
static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
int i;
params->udp_rss = udp_rss;
params->max_num_tx_rings_p_up = mlx4_low_memory_profile() ?
MLX4_EN_MIN_TX_RING_P_UP :
min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
params->prof[i].rx_pause = !(pfcrx || pfctx);
params->prof[i].rx_ppp = pfcrx;
params->prof[i].tx_pause = !(pfcrx || pfctx);
params->prof[i].tx_ppp = pfctx;
if (mlx4_low_memory_profile()) {
params->prof[i].tx_ring_size = MLX4_EN_MIN_TX_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_MIN_RX_SIZE;
} else {
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
}
params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
params->prof[i].num_up;
params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold;
}
}
static int mlx4_en_event(struct notifier_block *this, unsigned long event,
void *param)
{
struct mlx4_en_dev *mdev =
container_of(this, struct mlx4_en_dev, mlx_nb);
struct mlx4_dev *dev = mdev->dev;
struct mlx4_en_priv *priv;
int port;
switch (event) {
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
case MLX4_DEV_EVENT_SLAVE_INIT:
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
break;
default:
port = *(int *)param;
break;
}
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
case MLX4_DEV_EVENT_PORT_DOWN:
if (!mdev->pndev[port])
return NOTIFY_DONE;
priv = netdev_priv(mdev->pndev[port]);
/* To prevent races, we poll the link state in a separate
task rather than changing it here */
priv->link_state = event;
queue_work(mdev->workqueue, &priv->linkstate_task);
break;
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
mlx4_err(mdev, "Internal error detected, restarting device\n");
break;
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
case MLX4_DEV_EVENT_SLAVE_INIT:
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
break;
default:
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
return NOTIFY_DONE;
mlx4_warn(mdev, "Unhandled event %d for port %d\n", (int)event,
port);
}
return NOTIFY_DONE;
}
static void mlx4_en_remove(struct auxiliary_device *adev)
{
struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev);
struct mlx4_dev *dev = madev->mdev;
struct mlx4_en_dev *mdev = auxiliary_get_drvdata(adev);
int i;
mlx4_unregister_event_notifier(dev, &mdev->mlx_nb);
mutex_lock(&mdev->state_lock);
mdev->device_up = false;
mutex_unlock(&mdev->state_lock);
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map);
mlx4_uar_free(dev, &mdev->priv_uar);
mlx4_pd_free(dev, mdev->priv_pdn);
if (mdev->netdev_nb.notifier_call)
unregister_netdevice_notifier(&mdev->netdev_nb);
kfree(mdev);
}
static int mlx4_en_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev);
struct mlx4_dev *dev = madev->mdev;
struct mlx4_en_dev *mdev;
int err, i;
printk_once(KERN_INFO "%s", mlx4_en_version);
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
err = -ENOMEM;
goto err_free_res;
}
err = mlx4_pd_alloc(dev, &mdev->priv_pdn);
if (err)
goto err_free_dev;
err = mlx4_uar_alloc(dev, &mdev->priv_uar);
if (err)
goto err_pd;
mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT,
PAGE_SIZE);
if (!mdev->uar_map) {
err = -ENOMEM;
goto err_uar;
}
spin_lock_init(&mdev->uar_lock);
mdev->dev = dev;
mdev->dma_device = &dev->persist->pdev->dev;
mdev->pdev = dev->persist->pdev;
mdev->device_up = false;
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
if (!mdev->LSO_support)
mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
err = mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ, 0, 0,
&mdev->mr);
if (err) {
mlx4_err(mdev, "Failed allocating memory region\n");
goto err_map;
}
err = mlx4_mr_enable(mdev->dev, &mdev->mr);
if (err) {
mlx4_err(mdev, "Failed enabling memory region\n");
goto err_mr;
}
/* Build device profile according to supplied module parameters */
mlx4_en_get_profile(mdev);
/* Configure which ports to start according to module parameters */
mdev->port_cnt = 0;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
/* Set default number of RX rings*/
mlx4_en_set_num_rx_rings(mdev);
/* Create our own workqueue for reset/multicast tasks
* Note: we cannot use the shared workqueue because of deadlocks caused
* by the rtnl lock */
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
if (!mdev->workqueue) {
err = -ENOMEM;
goto err_mr;
}
/* At this stage all non-port specific tasks are complete:
* mark the card state as up */
mutex_init(&mdev->state_lock);
mdev->device_up = true;
/* register mlx4 core notifier */
mdev->mlx_nb.notifier_call = mlx4_en_event;
err = mlx4_register_event_notifier(dev, &mdev->mlx_nb);
WARN(err, "failed to register mlx4 event notifier (%d)", err);
/* Setup ports */
/* Create a netdev for each port */
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
mlx4_info(mdev, "Activating port:%d\n", i);
if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
mdev->pndev[i] = NULL;
}
/* register netdev notifier */
mdev->netdev_nb.notifier_call = mlx4_en_netdev_event;
if (register_netdevice_notifier(&mdev->netdev_nb)) {
mdev->netdev_nb.notifier_call = NULL;
mlx4_err(mdev, "Failed to create netdev notifier\n");
}
auxiliary_set_drvdata(adev, mdev);
return 0;
err_mr:
(void) mlx4_mr_free(dev, &mdev->mr);
err_map:
if (mdev->uar_map)
iounmap(mdev->uar_map);
err_uar:
mlx4_uar_free(dev, &mdev->priv_uar);
err_pd:
mlx4_pd_free(dev, mdev->priv_pdn);
err_free_dev:
kfree(mdev);
err_free_res:
return err;
}
static const struct auxiliary_device_id mlx4_en_id_table[] = {
{ .name = MLX4_ADEV_NAME ".eth" },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mlx4_en_id_table);
static struct mlx4_adrv mlx4_en_adrv = {
.adrv = {
.name = "eth",
.probe = mlx4_en_probe,
.remove = mlx4_en_remove,
.id_table = mlx4_en_id_table,
},
.protocol = MLX4_PROT_ETH,
};
static void mlx4_en_verify_params(void)
{
if (pfctx > MAX_PFC_TX) {
pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
pfctx, MAX_PFC_TX);
pfctx = 0;
}
if (pfcrx > MAX_PFC_RX) {
pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
pfcrx, MAX_PFC_RX);
pfcrx = 0;
}
if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
inline_thold = MAX_INLINE;
}
}
static int __init mlx4_en_init(void)
{
mlx4_en_verify_params();
mlx4_en_init_ptys2ethtool_map();
return mlx4_register_auxiliary_driver(&mlx4_en_adrv);
}
static void __exit mlx4_en_cleanup(void)
{
mlx4_unregister_auxiliary_driver(&mlx4_en_adrv);
}
module_init(mlx4_en_init);
module_exit(mlx4_en_cleanup);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_main.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
#include <linux/in.h>
#include <net/ip.h>
#include <linux/bitmap.h>
#include <linux/mii.h>
#include "mlx4_en.h"
#include "en_port.h"
#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
{
int i, t;
int err = 0;
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
priv->tx_cq[t][i]->moder_cnt = priv->tx_frames;
priv->tx_cq[t][i]->moder_time = priv->tx_usecs;
if (priv->port_up) {
err = mlx4_en_set_cq_moder(priv,
priv->tx_cq[t][i]);
if (err)
return err;
}
}
}
if (priv->adaptive_rx_coal)
return 0;
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_cq[i]->moder_cnt = priv->rx_frames;
priv->rx_cq[i]->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
if (priv->port_up) {
err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
if (err)
return err;
}
}
return err;
}
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strscpy(drvinfo->version, DRV_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
strscpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
}
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
"blueflame",
"phv-bit"
};
static const char main_strings[][ETH_GSTRING_LEN] = {
/* main statistics */
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
"rx_length_errors", "rx_over_errors", "rx_crc_errors",
"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
"tx_heartbeat_errors", "tx_window_errors",
/* port statistics */
"tso_packets",
"xmit_more",
"queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_pages",
"rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
/* pf statistics */
"pf_rx_packets",
"pf_rx_bytes",
"pf_tx_packets",
"pf_tx_bytes",
/* priority flow control statistics rx */
"rx_pause_prio_0", "rx_pause_duration_prio_0",
"rx_pause_transition_prio_0",
"rx_pause_prio_1", "rx_pause_duration_prio_1",
"rx_pause_transition_prio_1",
"rx_pause_prio_2", "rx_pause_duration_prio_2",
"rx_pause_transition_prio_2",
"rx_pause_prio_3", "rx_pause_duration_prio_3",
"rx_pause_transition_prio_3",
"rx_pause_prio_4", "rx_pause_duration_prio_4",
"rx_pause_transition_prio_4",
"rx_pause_prio_5", "rx_pause_duration_prio_5",
"rx_pause_transition_prio_5",
"rx_pause_prio_6", "rx_pause_duration_prio_6",
"rx_pause_transition_prio_6",
"rx_pause_prio_7", "rx_pause_duration_prio_7",
"rx_pause_transition_prio_7",
/* flow control statistics rx */
"rx_pause", "rx_pause_duration", "rx_pause_transition",
/* priority flow control statistics tx */
"tx_pause_prio_0", "tx_pause_duration_prio_0",
"tx_pause_transition_prio_0",
"tx_pause_prio_1", "tx_pause_duration_prio_1",
"tx_pause_transition_prio_1",
"tx_pause_prio_2", "tx_pause_duration_prio_2",
"tx_pause_transition_prio_2",
"tx_pause_prio_3", "tx_pause_duration_prio_3",
"tx_pause_transition_prio_3",
"tx_pause_prio_4", "tx_pause_duration_prio_4",
"tx_pause_transition_prio_4",
"tx_pause_prio_5", "tx_pause_duration_prio_5",
"tx_pause_transition_prio_5",
"tx_pause_prio_6", "tx_pause_duration_prio_6",
"tx_pause_transition_prio_6",
"tx_pause_prio_7", "tx_pause_duration_prio_7",
"tx_pause_transition_prio_7",
/* flow control statistics tx */
"tx_pause", "tx_pause_duration", "tx_pause_transition",
/* packet statistics */
"rx_multicast_packets",
"rx_broadcast_packets",
"rx_jabbers",
"rx_in_range_length_error",
"rx_out_range_length_error",
"tx_multicast_packets",
"tx_broadcast_packets",
"rx_prio_0_packets", "rx_prio_0_bytes",
"rx_prio_1_packets", "rx_prio_1_bytes",
"rx_prio_2_packets", "rx_prio_2_bytes",
"rx_prio_3_packets", "rx_prio_3_bytes",
"rx_prio_4_packets", "rx_prio_4_bytes",
"rx_prio_5_packets", "rx_prio_5_bytes",
"rx_prio_6_packets", "rx_prio_6_bytes",
"rx_prio_7_packets", "rx_prio_7_bytes",
"rx_novlan_packets", "rx_novlan_bytes",
"tx_prio_0_packets", "tx_prio_0_bytes",
"tx_prio_1_packets", "tx_prio_1_bytes",
"tx_prio_2_packets", "tx_prio_2_bytes",
"tx_prio_3_packets", "tx_prio_3_bytes",
"tx_prio_4_packets", "tx_prio_4_bytes",
"tx_prio_5_packets", "tx_prio_5_bytes",
"tx_prio_6_packets", "tx_prio_6_bytes",
"tx_prio_7_packets", "tx_prio_7_bytes",
"tx_novlan_packets", "tx_novlan_bytes",
/* xdp statistics */
"rx_xdp_drop",
"rx_xdp_redirect",
"rx_xdp_redirect_fail",
"rx_xdp_tx",
"rx_xdp_tx_full",
/* phy statistics */
"rx_packets_phy", "rx_bytes_phy",
"tx_packets_phy", "tx_bytes_phy",
};
static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
"Interrupt Test",
"Link Test",
"Speed Test",
"Register Test",
"Loopback Test",
};
static u32 mlx4_en_get_msglevel(struct net_device *dev)
{
return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
}
static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
{
((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
}
static void mlx4_en_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct mlx4_caps *caps = &priv->mdev->dev->caps;
int err = 0;
u64 config = 0;
u64 mask;
if ((priv->port < 1) || (priv->port > 2)) {
en_err(priv, "Failed to get WoL information\n");
return;
}
mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
MLX4_DEV_CAP_FLAG_WOL_PORT2;
if (!(caps->flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
}
if (caps->wol_port[priv->port])
wol->supported = WAKE_MAGIC;
else
wol->supported = 0;
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
if (err) {
en_err(priv, "Failed to get WoL information\n");
return;
}
if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
wol->wolopts = WAKE_MAGIC;
else
wol->wolopts = 0;
}
static int mlx4_en_set_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
u64 config = 0;
int err = 0;
u64 mask;
if ((priv->port < 1) || (priv->port > 2))
return -EOPNOTSUPP;
mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
MLX4_DEV_CAP_FLAG_WOL_PORT2;
if (!(priv->mdev->dev->caps.flags & mask))
return -EOPNOTSUPP;
if (wol->supported & ~WAKE_MAGIC)
return -EINVAL;
err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
if (err) {
en_err(priv, "Failed to get WoL info, unable to modify\n");
return err;
}
if (wol->wolopts & WAKE_MAGIC) {
config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
MLX4_EN_WOL_MAGIC;
} else {
config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
config |= MLX4_EN_WOL_DO_MODIFY;
}
err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
if (err)
en_err(priv, "Failed to set WoL information\n");
return err;
}
struct bitmap_iterator {
unsigned long *stats_bitmap;
unsigned int count;
unsigned int iterator;
bool advance_array; /* if set, force no increments */
};
static inline void bitmap_iterator_init(struct bitmap_iterator *h,
unsigned long *stats_bitmap,
int count)
{
h->iterator = 0;
h->advance_array = !bitmap_empty(stats_bitmap, count);
h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
: count;
h->stats_bitmap = stats_bitmap;
}
static inline int bitmap_iterator_test(struct bitmap_iterator *h)
{
return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
}
static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
{
return h->iterator++;
}
static inline unsigned int
bitmap_iterator_count(struct bitmap_iterator *h)
{
return h->count;
}
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct bitmap_iterator it;
bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
switch (sset) {
case ETH_SS_STATS:
return bitmap_iterator_count(&it) +
(priv->tx_ring_num[TX] * 2) +
(priv->rx_ring_num * (3 + NUM_XDP_STATS));
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx4_en_priv_flags);
default:
return -EOPNOTSUPP;
}
}
static void mlx4_en_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, uint64_t *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
int i;
struct bitmap_iterator it;
bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
spin_lock_bh(&priv->stats_lock);
mlx4_en_fold_software_stats(dev);
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&dev->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->port_stats)[i];
for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] =
((unsigned long *)&priv->pf_stats)[i];
for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] =
((u64 *)&priv->rx_priority_flowstats)[i];
for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((u64 *)&priv->rx_flowstats)[i];
for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] =
((u64 *)&priv->tx_priority_flowstats)[i];
for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((u64 *)&priv->tx_flowstats)[i];
for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->pkstats)[i];
for (i = 0; i < NUM_XDP_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->phy_stats)[i];
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
data[index++] = priv->tx_ring[TX][i]->packets;
data[index++] = priv->tx_ring[TX][i]->bytes;
}
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i]->packets;
data[index++] = priv->rx_ring[i]->bytes;
data[index++] = priv->rx_ring[i]->dropped;
data[index++] = priv->rx_ring[i]->xdp_drop;
data[index++] = priv->rx_ring[i]->xdp_redirect;
data[index++] = priv->rx_ring[i]->xdp_redirect_fail;
data[index++] = priv->rx_ring[i]->xdp_tx;
data[index++] = priv->rx_ring[i]->xdp_tx_full;
}
spin_unlock_bh(&priv->stats_lock);
}
static void mlx4_en_self_test(struct net_device *dev,
struct ethtool_test *etest, u64 *buf)
{
mlx4_en_ex_selftest(dev, &etest->flags, buf);
}
static void mlx4_en_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
int i, strings = 0;
struct bitmap_iterator it;
bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
for (; i < MLX4_EN_NUM_SELF_TEST; i++)
strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
break;
case ETH_SS_STATS:
/* Add main counters */
for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < NUM_PORT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < NUM_PF_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < NUM_PKT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < NUM_XDP_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < NUM_PHY_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_bytes", i);
}
for (i = 0; i < priv->rx_ring_num; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_dropped", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_drop", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_redirect", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_redirect_fail", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx_full", i);
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
strcpy(data + i * ETH_GSTRING_LEN,
mlx4_en_priv_flags[i]);
break;
}
}
static u32 mlx4_en_autoneg_get(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
u32 autoneg = AUTONEG_DISABLE;
if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
(priv->port_state.flags & MLX4_EN_PORT_ANE))
autoneg = AUTONEG_ENABLE;
return autoneg;
}
static void ptys2ethtool_update_supported_port(unsigned long *mask,
struct mlx4_ptys_reg *ptys_reg)
{
u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
| MLX4_PROT_MASK(MLX4_1000BASE_T)
| MLX4_PROT_MASK(MLX4_100BASE_TX))) {
__set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
} else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
| MLX4_PROT_MASK(MLX4_10GBASE_SR)
| MLX4_PROT_MASK(MLX4_56GBASE_SR4)
| MLX4_PROT_MASK(MLX4_40GBASE_CR4)
| MLX4_PROT_MASK(MLX4_40GBASE_SR4)
| MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
__set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
} else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
| MLX4_PROT_MASK(MLX4_40GBASE_KR4)
| MLX4_PROT_MASK(MLX4_20GBASE_KR2)
| MLX4_PROT_MASK(MLX4_10GBASE_KR)
| MLX4_PROT_MASK(MLX4_10GBASE_KX4)
| MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
__set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask);
}
}
static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
{
u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
if (!eth_proto) /* link down */
eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
| MLX4_PROT_MASK(MLX4_1000BASE_T)
| MLX4_PROT_MASK(MLX4_100BASE_TX))) {
return PORT_TP;
}
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
| MLX4_PROT_MASK(MLX4_56GBASE_SR4)
| MLX4_PROT_MASK(MLX4_40GBASE_SR4)
| MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
return PORT_FIBRE;
}
if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
| MLX4_PROT_MASK(MLX4_56GBASE_CR4)
| MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
return PORT_DA;
}
if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
| MLX4_PROT_MASK(MLX4_40GBASE_KR4)
| MLX4_PROT_MASK(MLX4_20GBASE_KR2)
| MLX4_PROT_MASK(MLX4_10GBASE_KR)
| MLX4_PROT_MASK(MLX4_10GBASE_KX4)
| MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
return PORT_NONE;
}
return PORT_OTHER;
}
#define MLX4_LINK_MODES_SZ \
(sizeof_field(struct mlx4_ptys_reg, eth_proto_cap) * 8)
enum ethtool_report {
SUPPORTED = 0,
ADVERTISED = 1,
};
struct ptys2ethtool_config {
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
u32 speed;
};
static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
enum ethtool_report report)
{
switch (report) {
case SUPPORTED:
return cfg->supported;
case ADVERTISED:
return cfg->advertised;
}
return NULL;
}
#define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
({ \
struct ptys2ethtool_config *cfg; \
static const unsigned int modes[] = { __VA_ARGS__ }; \
unsigned int i; \
cfg = &ptys2ethtool_map[reg_]; \
cfg->speed = speed_; \
linkmode_zero(cfg->supported); \
linkmode_zero(cfg->advertised); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
__set_bit(modes[i], cfg->supported); \
__set_bit(modes[i], cfg->advertised); \
} \
})
/* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ];
void __init mlx4_en_init_ptys2ethtool_map(void)
{
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100,
ETHTOOL_LINK_MODE_100baseT_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000,
ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000,
ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000,
ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000,
ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000,
ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000,
ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT);
};
static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
u32 eth_proto,
enum ethtool_report report)
{
int i;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
if (eth_proto & MLX4_PROT_MASK(i))
linkmode_or(link_modes, link_modes,
ptys2ethtool_link_mode(&ptys2ethtool_map[i], report));
}
}
static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
enum ethtool_report report)
{
int i;
u32 ptys_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
ulong *map_mode = ptys2ethtool_link_mode(&ptys2ethtool_map[i],
report);
if (linkmode_intersects(map_mode, link_modes))
ptys_modes |= 1 << i;
}
return ptys_modes;
}
/* Convert actual speed (SPEED_XXX) to ptys link modes */
static u32 speed2ptys_link_modes(u32 speed)
{
int i;
u32 ptys_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
if (ptys2ethtool_map[i].speed == speed)
ptys_modes |= 1 << i;
}
return ptys_modes;
}
static int
ethtool_get_ptys_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_ptys_reg ptys_reg;
u32 eth_proto;
int ret;
memset(&ptys_reg, 0, sizeof(ptys_reg));
ptys_reg.local_port = priv->port;
ptys_reg.proto_mask = MLX4_PTYS_EN;
ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
MLX4_ACCESS_REG_QUERY, &ptys_reg);
if (ret) {
en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
ret);
return ret;
}
en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
ptys_reg.proto_mask);
en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
be32_to_cpu(ptys_reg.eth_proto_cap));
en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
be32_to_cpu(ptys_reg.eth_proto_admin));
en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
be32_to_cpu(ptys_reg.eth_proto_oper));
en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
be32_to_cpu(ptys_reg.eth_proto_lp_adv));
/* reset supported/advertising masks */
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported,
&ptys_reg);
eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported,
eth_proto, SUPPORTED);
eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising,
eth_proto, ADVERTISED);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Pause);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Asym_Pause);
if (priv->prof->tx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Pause);
if (priv->prof->tx_pause ^ priv->prof->rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Asym_Pause);
link_ksettings->base.port = ptys_get_active_port(&ptys_reg);
if (mlx4_en_autoneg_get(dev)) {
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, Autoneg);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Autoneg);
}
link_ksettings->base.autoneg
= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
AUTONEG_ENABLE : AUTONEG_DISABLE;
eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
ptys2ethtool_update_link_modes(
link_ksettings->link_modes.lp_advertising,
eth_proto, ADVERTISED);
if (priv->port_state.flags & MLX4_EN_PORT_ANC)
ethtool_link_ksettings_add_link_mode(link_ksettings,
lp_advertising, Autoneg);
link_ksettings->base.phy_address = 0;
link_ksettings->base.mdio_support = 0;
link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
return ret;
}
static void
ethtool_get_default_link_ksettings(
struct net_device *dev, struct ethtool_link_ksettings *link_ksettings)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int trans_type;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
10000baseT_Full);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising,
10000baseT_Full);
trans_type = priv->port_state.transceiver;
if (trans_type > 0 && trans_type <= 0xC) {
link_ksettings->base.port = PORT_FIBRE;
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, FIBRE);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, FIBRE);
} else if (trans_type == 0x80 || trans_type == 0) {
link_ksettings->base.port = PORT_TP;
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, TP);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, TP);
} else {
link_ksettings->base.port = -1;
}
}
static int
mlx4_en_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int ret = -EINVAL;
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
priv->port_state.flags & MLX4_EN_PORT_ANC,
priv->port_state.flags & MLX4_EN_PORT_ANE);
if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings);
if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
ethtool_get_default_link_ksettings(dev, link_ksettings);
if (netif_carrier_ok(dev)) {
link_ksettings->base.speed = priv->port_state.link_speed;
link_ksettings->base.duplex = DUPLEX_FULL;
} else {
link_ksettings->base.speed = SPEED_UNKNOWN;
link_ksettings->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
}
/* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
__be32 proto_cap)
{
__be32 proto_admin = 0;
if (!speed) { /* Speed = 0 ==> Reset Link modes */
proto_admin = proto_cap;
en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
be32_to_cpu(proto_cap));
} else {
u32 ptys_link_modes = speed2ptys_link_modes(speed);
proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
en_info(priv, "Setting Speed to %d\n", speed);
}
return proto_admin;
}
static int
mlx4_en_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_ptys_reg ptys_reg;
__be32 proto_admin;
u8 cur_autoneg;
int ret;
u32 ptys_adv = ethtool2ptys_link_modes(
link_ksettings->link_modes.advertising, ADVERTISED);
const int speed = link_ksettings->base.speed;
en_dbg(DRV, priv,
"Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n",
speed, __ETHTOOL_LINK_MODE_MASK_NBITS,
link_ksettings->link_modes.advertising,
link_ksettings->base.autoneg,
link_ksettings->base.duplex);
if (!(priv->mdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
(link_ksettings->base.duplex == DUPLEX_HALF))
return -EINVAL;
memset(&ptys_reg, 0, sizeof(ptys_reg));
ptys_reg.local_port = priv->port;
ptys_reg.proto_mask = MLX4_PTYS_EN;
ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
MLX4_ACCESS_REG_QUERY, &ptys_reg);
if (ret) {
en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
ret);
return 0;
}
cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
proto_admin = speed_set_ptys_admin(priv, speed,
ptys_reg.eth_proto_cap);
if ((be32_to_cpu(proto_admin) &
(MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
(ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
} else {
proto_admin = cpu_to_be32(ptys_adv);
ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
}
proto_admin &= ptys_reg.eth_proto_cap;
if (!proto_admin) {
en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
return -EINVAL; /* nothing to change due to bad input */
}
if ((proto_admin == ptys_reg.eth_proto_admin) &&
((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
(link_ksettings->base.autoneg == cur_autoneg)))
return 0; /* Nothing to change */
en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
be32_to_cpu(proto_admin));
ptys_reg.eth_proto_admin = proto_admin;
ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
&ptys_reg);
if (ret) {
en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
be32_to_cpu(ptys_reg.eth_proto_admin), ret);
return ret;
}
mutex_lock(&priv->mdev->state_lock);
if (priv->port_up) {
en_warn(priv, "Port link mode changed, restarting port...\n");
mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&priv->mdev->state_lock);
return 0;
}
static int mlx4_en_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
coal->tx_coalesce_usecs = priv->tx_usecs;
coal->tx_max_coalesced_frames = priv->tx_frames;
coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
coal->rx_coalesce_usecs = priv->rx_usecs;
coal->rx_max_coalesced_frames = priv->rx_frames;
coal->pkt_rate_low = priv->pkt_rate_low;
coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
coal->pkt_rate_high = priv->pkt_rate_high;
coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
coal->rate_sample_interval = priv->sample_interval;
coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
return 0;
}
static int mlx4_en_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!coal->tx_max_coalesced_frames_irq)
return -EINVAL;
if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
__func__, MLX4_EN_MAX_COAL_TIME);
return -ERANGE;
}
if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
__func__, MLX4_EN_MAX_COAL_PKTS);
return -ERANGE;
}
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
coal->rx_max_coalesced_frames;
priv->rx_usecs = (coal->rx_coalesce_usecs ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TIME :
coal->rx_coalesce_usecs;
/* Setting TX coalescing parameters */
if (coal->tx_coalesce_usecs != priv->tx_usecs ||
coal->tx_max_coalesced_frames != priv->tx_frames) {
priv->tx_usecs = coal->tx_coalesce_usecs;
priv->tx_frames = coal->tx_max_coalesced_frames;
}
/* Set adaptive coalescing params */
priv->pkt_rate_low = coal->pkt_rate_low;
priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
priv->pkt_rate_high = coal->pkt_rate_high;
priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
priv->sample_interval = coal->rate_sample_interval;
priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
return mlx4_en_moderation_update(priv);
}
static int mlx4_en_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;
if (pause->autoneg)
return -EINVAL;
tx_pause = !!(pause->tx_pause);
rx_pause = !!(pause->rx_pause);
rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
tx_pause, tx_ppp, rx_pause, rx_ppp);
if (err) {
en_err(priv, "Failed setting pause params, err = %d\n", err);
return err;
}
mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
rx_ppp, rx_pause, tx_ppp, tx_pause);
priv->prof->tx_pause = tx_pause;
priv->prof->rx_pause = rx_pause;
priv->prof->tx_ppp = tx_ppp;
priv->prof->rx_ppp = rx_ppp;
return err;
}
static void mlx4_en_get_pause_stats(struct net_device *dev,
struct ethtool_pause_stats *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct bitmap_iterator it;
bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
spin_lock_bh(&priv->stats_lock);
if (test_bit(FLOW_PRIORITY_STATS_IDX_TX_FRAMES,
priv->stats_bitmap.bitmap))
stats->tx_pause_frames = priv->tx_flowstats.tx_pause;
if (test_bit(FLOW_PRIORITY_STATS_IDX_RX_FRAMES,
priv->stats_bitmap.bitmap))
stats->rx_pause_frames = priv->rx_flowstats.rx_pause;
spin_unlock_bh(&priv->stats_lock);
}
static void mlx4_en_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
pause->tx_pause = priv->prof->tx_pause;
pause->rx_pause = priv->prof->rx_pause;
}
static int mlx4_en_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
if (param->rx_jumbo_pending || param->rx_mini_pending)
return -EINVAL;
if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) {
en_warn(priv, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
MLX4_EN_MIN_RX_SIZE);
return -EINVAL;
}
if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) {
en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n",
__func__, param->tx_pending,
MLX4_EN_MIN_TX_SIZE);
return -EINVAL;
}
rx_size = roundup_pow_of_two(param->rx_pending);
tx_size = roundup_pow_of_two(param->tx_pending);
if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
priv->rx_ring[0]->size) &&
tx_size == priv->tx_ring[TX][0]->size)
return 0;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.tx_ring_size = tx_size;
new_prof.rx_ring_size = rx_size;
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_safe_replace_resources(priv, tmp);
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
err = mlx4_en_moderation_update(priv);
out:
kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
static void mlx4_en_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
memset(param, 0, sizeof(*param));
param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
param->rx_pending = priv->port_up ?
priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
param->tx_pending = priv->tx_ring[TX][0]->size;
}
static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
return rounddown_pow_of_two(priv->rx_ring_num);
}
static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
{
return MLX4_EN_RSS_KEY_SIZE;
}
static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
/* check if requested function is supported by the device */
if (hfunc == ETH_RSS_HASH_TOP) {
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
return -EINVAL;
if (!(dev->features & NETIF_F_RXHASH))
en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
return 0;
} else if (hfunc == ETH_RSS_HASH_XOR) {
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
return -EINVAL;
if (dev->features & NETIF_F_RXHASH)
en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
return 0;
}
return -EINVAL;
}
static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
u8 *hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
u32 i, rss_rings;
rss_rings = priv->prof->rss_rings ?: n;
rss_rings = rounddown_pow_of_two(rss_rings);
for (i = 0; i < n; i++) {
if (!ring_index)
break;
ring_index[i] = i % rss_rings;
}
if (key)
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
if (hfunc)
*hfunc = priv->rss_hash_fn;
return 0;
}
static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
const u8 *key, const u8 hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u32 n = mlx4_en_get_rxfh_indir_size(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
int i;
int rss_rings = 0;
/* Calculate RSS table size and make sure flows are spread evenly
* between rings
*/
for (i = 0; i < n; i++) {
if (!ring_index)
break;
if (i > 0 && !ring_index[i] && !rss_rings)
rss_rings = i;
if (ring_index[i] != (i % (rss_rings ?: n)))
return -EINVAL;
}
if (!rss_rings)
rss_rings = n;
/* RSS table size must be an order of 2 */
if (!is_power_of_2(rss_rings))
return -EINVAL;
if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
err = mlx4_en_check_rxfh_func(dev, hfunc);
if (err)
return err;
}
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
if (ring_index)
priv->prof->rss_rings = rss_rings;
if (key)
memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
if (hfunc != ETH_RSS_HASH_NO_CHANGE)
priv->rss_hash_fn = hfunc;
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
mutex_unlock(&mdev->state_lock);
return err;
}
#define all_zeros_or_all_ones(field) \
((field) == 0 || (field) == (__force typeof(field))-1)
static int mlx4_en_validate_flow(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
struct ethtool_usrip4_spec *l3_mask;
struct ethtool_tcpip4_spec *l4_mask;
struct ethhdr *eth_mask;
if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
return -EINVAL;
if (cmd->fs.flow_type & FLOW_MAC_EXT) {
/* dest mac mask must be ff:ff:ff:ff:ff:ff */
if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
return -EINVAL;
}
switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
if (cmd->fs.m_u.tcp_ip4_spec.tos)
return -EINVAL;
l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
/* don't allow mask which isn't all 0 or 1 */
if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
!all_zeros_or_all_ones(l4_mask->ip4dst) ||
!all_zeros_or_all_ones(l4_mask->psrc) ||
!all_zeros_or_all_ones(l4_mask->pdst))
return -EINVAL;
break;
case IP_USER_FLOW:
l3_mask = &cmd->fs.m_u.usr_ip4_spec;
if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
(!l3_mask->ip4src && !l3_mask->ip4dst) ||
!all_zeros_or_all_ones(l3_mask->ip4src) ||
!all_zeros_or_all_ones(l3_mask->ip4dst))
return -EINVAL;
break;
case ETHER_FLOW:
eth_mask = &cmd->fs.m_u.ether_spec;
/* source mac mask must not be set */
if (!is_zero_ether_addr(eth_mask->h_source))
return -EINVAL;
/* dest mac mask must be ff:ff:ff:ff:ff:ff */
if (!is_broadcast_ether_addr(eth_mask->h_dest))
return -EINVAL;
if (!all_zeros_or_all_ones(eth_mask->h_proto))
return -EINVAL;
break;
default:
return -EINVAL;
}
if ((cmd->fs.flow_type & FLOW_EXT)) {
if (cmd->fs.m_ext.vlan_etype ||
!((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
0 ||
(cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
cpu_to_be16(VLAN_VID_MASK)))
return -EINVAL;
if (cmd->fs.m_ext.vlan_tci) {
if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
return -EINVAL;
}
}
return 0;
}
static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
struct list_head *rule_list_h,
struct mlx4_spec_list *spec_l2,
unsigned char *mac)
{
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
if ((cmd->fs.flow_type & FLOW_EXT) &&
(cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
}
list_add_tail(&spec_l2->list, rule_list_h);
return 0;
}
static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd,
struct list_head *rule_list_h,
struct mlx4_spec_list *spec_l2,
__be32 ipv4_dst)
{
#ifdef CONFIG_INET
unsigned char mac[ETH_ALEN];
if (!ipv4_is_multicast(ipv4_dst)) {
if (cmd->fs.flow_type & FLOW_MAC_EXT)
memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
else
memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
} else {
ip_eth_mc_map(ipv4_dst, mac);
}
return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
#else
return -EINVAL;
#endif
}
static int add_ip_rule(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd,
struct list_head *list_h)
{
int err;
struct mlx4_spec_list *spec_l2;
struct mlx4_spec_list *spec_l3;
struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
if (!spec_l2 || !spec_l3) {
err = -ENOMEM;
goto free_spec;
}
err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
cmd->fs.h_u.
usr_ip4_spec.ip4dst);
if (err)
goto free_spec;
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
if (l3_mask->ip4src)
spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
if (l3_mask->ip4dst)
spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
list_add_tail(&spec_l3->list, list_h);
return 0;
free_spec:
kfree(spec_l2);
kfree(spec_l3);
return err;
}
static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd,
struct list_head *list_h, int proto)
{
int err;
struct mlx4_spec_list *spec_l2;
struct mlx4_spec_list *spec_l3;
struct mlx4_spec_list *spec_l4;
struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
if (!spec_l2 || !spec_l3 || !spec_l4) {
err = -ENOMEM;
goto free_spec;
}
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
if (proto == TCP_V4_FLOW) {
err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
spec_l2,
cmd->fs.h_u.
tcp_ip4_spec.ip4dst);
if (err)
goto free_spec;
spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
} else {
err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
spec_l2,
cmd->fs.h_u.
udp_ip4_spec.ip4dst);
if (err)
goto free_spec;
spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
}
if (l4_mask->ip4src)
spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
if (l4_mask->ip4dst)
spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
if (l4_mask->psrc)
spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
if (l4_mask->pdst)
spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
list_add_tail(&spec_l3->list, list_h);
list_add_tail(&spec_l4->list, list_h);
return 0;
free_spec:
kfree(spec_l2);
kfree(spec_l3);
kfree(spec_l4);
return err;
}
static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
struct ethtool_rxnfc *cmd,
struct list_head *rule_list_h)
{
int err;
struct ethhdr *eth_spec;
struct mlx4_spec_list *spec_l2;
struct mlx4_en_priv *priv = netdev_priv(dev);
err = mlx4_en_validate_flow(dev, cmd);
if (err)
return err;
switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
if (!spec_l2)
return -ENOMEM;
eth_spec = &cmd->fs.h_u.ether_spec;
mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
ð_spec->h_dest[0]);
spec_l2->eth.ether_type = eth_spec->h_proto;
if (eth_spec->h_proto)
spec_l2->eth.ether_type_enable = 1;
break;
case IP_USER_FLOW:
err = add_ip_rule(priv, cmd, rule_list_h);
break;
case TCP_V4_FLOW:
err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
break;
case UDP_V4_FLOW:
err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
break;
}
return err;
}
static int mlx4_en_flow_replace(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
int err;
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ethtool_flow_id *loc_rule;
struct mlx4_spec_list *spec, *tmp_spec;
u32 qpn;
u64 reg_id;
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
};
rule.port = priv->port;
rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
INIT_LIST_HEAD(&rule.list);
/* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
qpn = priv->drop_qp.qpn;
else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
} else {
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
if (!qpn) {
en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
}
rule.qpn = qpn;
err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
if (err)
goto out_free_list;
loc_rule = &priv->ethtool_rules[cmd->fs.location];
if (loc_rule->id) {
err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
if (err) {
en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
cmd->fs.location, loc_rule->id);
goto out_free_list;
}
loc_rule->id = 0;
memset(&loc_rule->flow_spec, 0,
sizeof(struct ethtool_rx_flow_spec));
list_del(&loc_rule->list);
}
err = mlx4_flow_attach(priv->mdev->dev, &rule, ®_id);
if (err) {
en_err(priv, "Fail to attach network rule at location %d\n",
cmd->fs.location);
goto out_free_list;
}
loc_rule->id = reg_id;
memcpy(&loc_rule->flow_spec, &cmd->fs,
sizeof(struct ethtool_rx_flow_spec));
list_add_tail(&loc_rule->list, &priv->ethtool_list);
out_free_list:
list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
list_del(&spec->list);
kfree(spec);
}
return err;
}
static int mlx4_en_flow_detach(struct net_device *dev,
struct ethtool_rxnfc *cmd)
{
int err = 0;
struct ethtool_flow_id *rule;
struct mlx4_en_priv *priv = netdev_priv(dev);
if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
return -EINVAL;
rule = &priv->ethtool_rules[cmd->fs.location];
if (!rule->id) {
err = -ENOENT;
goto out;
}
err = mlx4_flow_detach(priv->mdev->dev, rule->id);
if (err) {
en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
cmd->fs.location, rule->id);
goto out;
}
rule->id = 0;
memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
list_del(&rule->list);
out:
return err;
}
static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
int loc)
{
int err = 0;
struct ethtool_flow_id *rule;
struct mlx4_en_priv *priv = netdev_priv(dev);
if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
return -EINVAL;
rule = &priv->ethtool_rules[loc];
if (rule->id)
memcpy(&cmd->fs, &rule->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
else
err = -ENOENT;
return err;
}
static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
{
int i, res = 0;
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
if (priv->ethtool_rules[i].id)
res++;
}
return res;
}
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
int i = 0, priority = 0;
if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
cmd->cmd == ETHTOOL_GRXCLSRULE ||
cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
(mdev->dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
return -EINVAL;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = priv->rx_ring_num;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = mlx4_en_get_num_flows(priv);
break;
case ETHTOOL_GRXCLSRULE:
err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
cmd->data = MAX_NUM_OF_FS_RULES;
while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
err = mlx4_en_get_flow(dev, cmd, i);
if (!err)
rule_locs[priority++] = i;
i++;
}
err = 0;
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
int err = 0;
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
if (mdev->dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
return -EINVAL;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = mlx4_en_flow_replace(dev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
err = mlx4_en_flow_detach(dev, cmd);
break;
default:
en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
return -EINVAL;
}
return err;
}
static int mlx4_en_get_max_num_rx_rings(struct net_device *dev)
{
return min_t(int, num_online_cpus(), MAX_RX_RINGS);
}
static void mlx4_en_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
channel->max_rx = mlx4_en_get_max_num_rx_rings(dev);
channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up;
channel->rx_count = priv->rx_ring_num;
channel->tx_count = priv->tx_ring_num[TX] /
priv->prof->num_up;
}
static int mlx4_en_set_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
int total_tx_count;
int port_up = 0;
int xdp_count;
int err = 0;
u8 up;
if (!channel->tx_count || !channel->rx_count)
return -EINVAL;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count;
if (total_tx_count > MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
total_tx_count, MAX_TX_RINGS);
goto out;
}
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.num_tx_rings_p_up = channel->tx_count;
new_prof.tx_ring_num[TX] = channel->tx_count * priv->prof->num_up;
new_prof.tx_ring_num[TX_XDP] = xdp_count;
new_prof.rx_ring_num = channel->rx_count;
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_safe_replace_resources(priv, tmp);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
up = (priv->prof->num_up == MLX4_EN_NUM_UP_LOW) ?
0 : priv->prof->num_up;
mlx4_en_setup_tc(dev, up);
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
err = mlx4_en_moderation_update(priv);
out:
mutex_unlock(&mdev->state_lock);
kfree(tmp);
return err;
}
static int mlx4_en_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int ret;
ret = ethtool_op_get_ts_info(dev, info);
if (ret)
return ret;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
info->so_timestamping |=
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters =
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
if (mdev->ptp_clock)
info->phc_index = ptp_clock_index(mdev->ptp_clock);
}
return ret;
}
static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
int i;
int ret = 0;
if (bf_enabled_new != bf_enabled_old) {
int t;
if (bf_enabled_new) {
bool bf_supported = true;
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
for (i = 0; i < priv->tx_ring_num[t]; i++)
bf_supported &=
priv->tx_ring[t][i]->bf_alloced;
if (!bf_supported) {
en_err(priv, "BlueFlame is not supported\n");
return -EINVAL;
}
priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
} else {
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
for (i = 0; i < priv->tx_ring_num[t]; i++)
priv->tx_ring[t][i]->bf_enabled =
bf_enabled_new;
en_info(priv, "BlueFlame %s\n",
bf_enabled_new ? "Enabled" : "Disabled");
}
if (phv_enabled_new != phv_enabled_old) {
ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
if (ret)
return ret;
else if (phv_enabled_new)
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
else
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
en_info(priv, "PHV bit %s\n",
phv_enabled_new ? "Enabled" : "Disabled");
}
return 0;
}
static u32 mlx4_en_get_priv_flags(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
return priv->pflags;
}
static int mlx4_en_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
void *data)
{
const struct mlx4_en_priv *priv = netdev_priv(dev);
int ret = 0;
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
*(u32 *)data = priv->prof->inline_thold;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int mlx4_en_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int val, ret = 0;
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
val = *(u32 *)data;
if (val < MIN_PKT_LEN || val > MAX_INLINE)
ret = -EINVAL;
else
priv->prof->inline_thold = val;
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int ret;
u8 data[4];
/* Read first 2 bytes to get Module & REV ID */
ret = mlx4_get_module_info(mdev->dev, priv->port,
0/*offset*/, 2/*size*/, data);
if (ret < 2)
return -EIO;
switch (data[0] /* identifier */) {
case MLX4_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
break;
case MLX4_MODULE_ID_QSFP_PLUS:
if (data[1] >= 0x3) { /* revision id */
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
}
break;
case MLX4_MODULE_ID_QSFP28:
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
return -EINVAL;
}
return 0;
}
static int mlx4_en_get_module_eeprom(struct net_device *dev,
struct ethtool_eeprom *ee,
u8 *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int offset = ee->offset;
int i = 0, ret;
if (ee->len == 0)
return -EINVAL;
memset(data, 0, ee->len);
while (i < ee->len) {
en_dbg(DRV, priv,
"mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
i, offset, ee->len - i);
ret = mlx4_get_module_info(mdev->dev, priv->port,
offset, ee->len - i, data + i);
if (!ret) /* Done reading */
return 0;
if (ret < 0) {
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
return ret;
}
i += ret;
offset += ret;
}
return 0;
}
static int mlx4_en_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
int err;
u16 beacon_duration;
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
return -EOPNOTSUPP;
switch (state) {
case ETHTOOL_ID_ACTIVE:
beacon_duration = PORT_BEACON_MAX_LIMIT;
break;
case ETHTOOL_ID_INACTIVE:
beacon_duration = 0;
break;
default:
return -EOPNOTSUPP;
}
err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
return err;
}
const struct ethtool_ops mlx4_en_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_PKT_RATE_RX_USECS,
.get_drvinfo = mlx4_en_get_drvinfo,
.get_link_ksettings = mlx4_en_get_link_ksettings,
.set_link_ksettings = mlx4_en_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_strings = mlx4_en_get_strings,
.get_sset_count = mlx4_en_get_sset_count,
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
.self_test = mlx4_en_self_test,
.set_phys_id = mlx4_en_set_phys_id,
.get_wol = mlx4_en_get_wol,
.set_wol = mlx4_en_set_wol,
.get_msglevel = mlx4_en_get_msglevel,
.set_msglevel = mlx4_en_set_msglevel,
.get_coalesce = mlx4_en_get_coalesce,
.set_coalesce = mlx4_en_set_coalesce,
.get_pause_stats = mlx4_en_get_pause_stats,
.get_pauseparam = mlx4_en_get_pauseparam,
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
.get_rxfh = mlx4_en_get_rxfh,
.set_rxfh = mlx4_en_set_rxfh,
.get_channels = mlx4_en_get_channels,
.set_channels = mlx4_en_set_channels,
.get_ts_info = mlx4_en_get_ts_info,
.set_priv_flags = mlx4_en_set_priv_flags,
.get_priv_flags = mlx4_en_get_priv_flags,
.get_tunable = mlx4_en_get_tunable,
.set_tunable = mlx4_en_set_tunable,
.get_module_info = mlx4_en_get_module_info,
.get_module_eeprom = mlx4_en_get_module_eeprom
};
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/if_vlan.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include "en_port.h"
#include "mlx4_en.h"
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_vlan_fltr_mbox *filter;
int i;
int j;
int index = 0;
u32 entry;
int err = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
filter = mailbox->buf;
for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
entry = 0;
for (j = 0; j < 32; j++)
if (test_bit(index++, priv->active_vlans))
entry |= 1 << j;
filter->entry[i] = cpu_to_be32(entry);
}
err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
{
struct mlx4_en_query_port_context *qport_context;
struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
struct mlx4_en_port_state *state = &priv->port_state;
struct mlx4_cmd_mailbox *mailbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err)
goto out;
qport_context = mailbox->buf;
/* This command is always accessed from Ethtool context
* already synchronized, no need in locking */
state->link_state = !!(qport_context->link_up & MLX4_EN_LINK_UP_MASK);
switch (qport_context->link_speed & MLX4_EN_SPEED_MASK) {
case MLX4_EN_100M_SPEED:
state->link_speed = SPEED_100;
break;
case MLX4_EN_1G_SPEED:
state->link_speed = SPEED_1000;
break;
case MLX4_EN_10G_SPEED_XAUI:
case MLX4_EN_10G_SPEED_XFI:
state->link_speed = SPEED_10000;
break;
case MLX4_EN_20G_SPEED:
state->link_speed = SPEED_20000;
break;
case MLX4_EN_40G_SPEED:
state->link_speed = SPEED_40000;
break;
case MLX4_EN_56G_SPEED:
state->link_speed = SPEED_56000;
break;
default:
state->link_speed = -1;
break;
}
state->transceiver = qport_context->transceiver;
state->flags = 0; /* Reset and recalculate the port flags */
state->flags |= (qport_context->link_up & MLX4_EN_ANC_MASK) ?
MLX4_EN_PORT_ANC : 0;
state->flags |= (qport_context->autoneg & MLX4_EN_AUTONEG_MASK) ?
MLX4_EN_PORT_ANE : 0;
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
}
/* Each counter set is located in struct mlx4_en_stat_out_mbox
* with a const offset between its prio components.
* This function runs over a counter set and sum all of it's prio components.
*/
static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
{
__be64 *curr = start;
unsigned long ret = 0;
int i;
int offset = next - start;
for (i = 0; i < num; i++) {
ret += be64_to_cpu(*curr);
curr += offset;
}
return ret;
}
void mlx4_en_fold_software_stats(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
unsigned long packets, bytes;
int i;
if (!priv->port_up || mlx4_is_master(mdev->dev))
return;
packets = 0;
bytes = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
packets += READ_ONCE(ring->packets);
bytes += READ_ONCE(ring->bytes);
}
dev->stats.rx_packets = packets;
dev->stats.rx_bytes = bytes;
packets = 0;
bytes = 0;
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
packets += READ_ONCE(ring->packets);
bytes += READ_ONCE(ring->bytes);
}
dev->stats.tx_packets = packets;
dev->stats.tx_bytes = bytes;
}
int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
{
struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
struct net_device *dev = mdev->pndev[port];
struct mlx4_en_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct mlx4_cmd_mailbox *mailbox, *mailbox_priority;
u64 in_mod = reset << 8 | port;
int err;
int i, counter_index;
unsigned long sw_tx_dropped = 0;
unsigned long sw_rx_dropped = 0;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox_priority)) {
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return PTR_ERR(mailbox_priority);
}
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
if (err)
goto out;
mlx4_en_stats = mailbox->buf;
memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats));
counter_index = mlx4_get_default_counter_index(mdev->dev, port);
err = mlx4_get_counter_stats(mdev->dev, counter_index,
&tmp_counter_stats, reset);
/* 0xffs indicates invalid value */
memset(mailbox_priority->buf, 0xff,
sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) {
memset(mailbox_priority->buf, 0,
sizeof(*flowstats) * MLX4_NUM_PRIORITIES);
err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma,
in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
0, MLX4_CMD_DUMP_ETH_STATS,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
}
flowstats = mailbox_priority->buf;
spin_lock_bh(&priv->stats_lock);
mlx4_en_fold_software_stats(dev);
priv->port_stats.rx_chksum_good = 0;
priv->port_stats.rx_chksum_none = 0;
priv->port_stats.rx_chksum_complete = 0;
priv->port_stats.rx_alloc_pages = 0;
priv->xdp_stats.rx_xdp_drop = 0;
priv->xdp_stats.rx_xdp_redirect = 0;
priv->xdp_stats.rx_xdp_redirect_fail = 0;
priv->xdp_stats.rx_xdp_tx = 0;
priv->xdp_stats.rx_xdp_tx_full = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
const struct mlx4_en_rx_ring *ring = priv->rx_ring[i];
sw_rx_dropped += READ_ONCE(ring->dropped);
priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok);
priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none);
priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect);
priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail);
priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
}
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
priv->port_stats.tso_packets = 0;
priv->port_stats.xmit_more = 0;
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
const struct mlx4_en_tx_ring *ring = priv->tx_ring[TX][i];
sw_tx_dropped += READ_ONCE(ring->tx_dropped);
priv->port_stats.tx_chksum_offload += READ_ONCE(ring->tx_csum);
priv->port_stats.queue_stopped += READ_ONCE(ring->queue_stopped);
priv->port_stats.wake_queue += READ_ONCE(ring->wake_queue);
priv->port_stats.tso_packets += READ_ONCE(ring->tso_packets);
priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more);
}
if (!mlx4_is_slave(mdev->dev)) {
struct mlx4_en_phy_stats *p_stats = &priv->phy_stats;
p_stats->rx_packets_phy =
en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
&mlx4_en_stats->RTOT_prio_1,
NUM_PRIORITIES);
p_stats->tx_packets_phy =
en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
&mlx4_en_stats->TTOT_prio_1,
NUM_PRIORITIES);
p_stats->rx_bytes_phy =
en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
&mlx4_en_stats->ROCT_prio_1,
NUM_PRIORITIES);
p_stats->tx_bytes_phy =
en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
&mlx4_en_stats->TOCT_prio_1,
NUM_PRIORITIES);
if (mlx4_is_master(mdev->dev)) {
stats->rx_packets = p_stats->rx_packets_phy;
stats->tx_packets = p_stats->tx_packets_phy;
stats->rx_bytes = p_stats->rx_bytes_phy;
stats->tx_bytes = p_stats->tx_bytes_phy;
}
}
/* net device stats */
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
be32_to_cpu(mlx4_en_stats->RJBBR) +
be32_to_cpu(mlx4_en_stats->RCRC) +
be32_to_cpu(mlx4_en_stats->RRUNT) +
be64_to_cpu(mlx4_en_stats->RInRangeLengthErr) +
be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr) +
be32_to_cpu(mlx4_en_stats->RSHORT) +
en_stats_adder(&mlx4_en_stats->RGIANT_prio_0,
&mlx4_en_stats->RGIANT_prio_1,
NUM_PRIORITIES);
stats->tx_errors = en_stats_adder(&mlx4_en_stats->TGIANT_prio_0,
&mlx4_en_stats->TGIANT_prio_1,
NUM_PRIORITIES);
stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP) +
sw_tx_dropped;
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
priv->pkstats.rx_broadcast_packets =
en_stats_adder(&mlx4_en_stats->RBCAST_prio_0,
&mlx4_en_stats->RBCAST_prio_1,
NUM_PRIORITIES);
priv->pkstats.rx_jabbers = be32_to_cpu(mlx4_en_stats->RJBBR);
priv->pkstats.rx_in_range_length_error =
be64_to_cpu(mlx4_en_stats->RInRangeLengthErr);
priv->pkstats.rx_out_range_length_error =
be64_to_cpu(mlx4_en_stats->ROutRangeLengthErr);
/* Tx stats */
priv->pkstats.tx_multicast_packets =
en_stats_adder(&mlx4_en_stats->TMCAST_prio_0,
&mlx4_en_stats->TMCAST_prio_1,
NUM_PRIORITIES);
priv->pkstats.tx_broadcast_packets =
en_stats_adder(&mlx4_en_stats->TBCAST_prio_0,
&mlx4_en_stats->TBCAST_prio_1,
NUM_PRIORITIES);
priv->pkstats.rx_prio[0][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
priv->pkstats.rx_prio[0][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_0);
priv->pkstats.rx_prio[1][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
priv->pkstats.rx_prio[1][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_1);
priv->pkstats.rx_prio[2][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
priv->pkstats.rx_prio[2][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_2);
priv->pkstats.rx_prio[3][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
priv->pkstats.rx_prio[3][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_3);
priv->pkstats.rx_prio[4][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
priv->pkstats.rx_prio[4][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_4);
priv->pkstats.rx_prio[5][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
priv->pkstats.rx_prio[5][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_5);
priv->pkstats.rx_prio[6][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
priv->pkstats.rx_prio[6][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_6);
priv->pkstats.rx_prio[7][0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
priv->pkstats.rx_prio[7][1] = be64_to_cpu(mlx4_en_stats->ROCT_prio_7);
priv->pkstats.rx_prio[8][0] = be64_to_cpu(mlx4_en_stats->RTOT_novlan);
priv->pkstats.rx_prio[8][1] = be64_to_cpu(mlx4_en_stats->ROCT_novlan);
priv->pkstats.tx_prio[0][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
priv->pkstats.tx_prio[0][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_0);
priv->pkstats.tx_prio[1][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
priv->pkstats.tx_prio[1][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_1);
priv->pkstats.tx_prio[2][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
priv->pkstats.tx_prio[2][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_2);
priv->pkstats.tx_prio[3][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
priv->pkstats.tx_prio[3][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_3);
priv->pkstats.tx_prio[4][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
priv->pkstats.tx_prio[4][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_4);
priv->pkstats.tx_prio[5][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
priv->pkstats.tx_prio[5][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_5);
priv->pkstats.tx_prio[6][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
priv->pkstats.tx_prio[6][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_6);
priv->pkstats.tx_prio[7][0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
priv->pkstats.tx_prio[7][1] = be64_to_cpu(mlx4_en_stats->TOCT_prio_7);
priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan);
priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan);
if (tmp_counter_stats.counter_mode == 0) {
priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes);
priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes);
priv->pf_stats.rx_packets = be64_to_cpu(tmp_counter_stats.rx_frames);
priv->pf_stats.tx_packets = be64_to_cpu(tmp_counter_stats.tx_frames);
}
for (i = 0; i < MLX4_NUM_PRIORITIES; i++) {
priv->rx_priority_flowstats[i].rx_pause =
be64_to_cpu(flowstats[i].rx_pause);
priv->rx_priority_flowstats[i].rx_pause_duration =
be64_to_cpu(flowstats[i].rx_pause_duration);
priv->rx_priority_flowstats[i].rx_pause_transition =
be64_to_cpu(flowstats[i].rx_pause_transition);
priv->tx_priority_flowstats[i].tx_pause =
be64_to_cpu(flowstats[i].tx_pause);
priv->tx_priority_flowstats[i].tx_pause_duration =
be64_to_cpu(flowstats[i].tx_pause_duration);
priv->tx_priority_flowstats[i].tx_pause_transition =
be64_to_cpu(flowstats[i].tx_pause_transition);
}
/* if pfc is not in use, all priorities counters have the same value */
priv->rx_flowstats.rx_pause =
be64_to_cpu(flowstats[0].rx_pause);
priv->rx_flowstats.rx_pause_duration =
be64_to_cpu(flowstats[0].rx_pause_duration);
priv->rx_flowstats.rx_pause_transition =
be64_to_cpu(flowstats[0].rx_pause_transition);
priv->tx_flowstats.tx_pause =
be64_to_cpu(flowstats[0].tx_pause);
priv->tx_flowstats.tx_pause_duration =
be64_to_cpu(flowstats[0].tx_pause_duration);
priv->tx_flowstats.tx_pause_transition =
be64_to_cpu(flowstats[0].tx_pause_transition);
spin_unlock_bh(&priv->stats_lock);
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority);
return err;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_port.c
|
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <net/devlink.h>
#include "mlx4.h"
static DEFINE_MUTEX(intf_mutex);
static DEFINE_IDA(mlx4_adev_ida);
static bool is_eth_supported(struct mlx4_dev *dev)
{
for (int port = 1; port <= dev->caps.num_ports; port++)
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
return true;
return false;
}
static bool is_ib_supported(struct mlx4_dev *dev)
{
for (int port = 1; port <= dev->caps.num_ports; port++)
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
return true;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
return true;
return false;
}
static const struct mlx4_adev_device {
const char *suffix;
bool (*is_supported)(struct mlx4_dev *dev);
} mlx4_adev_devices[] = {
{ "eth", is_eth_supported },
{ "ib", is_ib_supported },
};
int mlx4_adev_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
priv->adev_idx = ida_alloc(&mlx4_adev_ida, GFP_KERNEL);
if (priv->adev_idx < 0)
return priv->adev_idx;
priv->adev = kcalloc(ARRAY_SIZE(mlx4_adev_devices),
sizeof(struct mlx4_adev *), GFP_KERNEL);
if (!priv->adev) {
ida_free(&mlx4_adev_ida, priv->adev_idx);
return -ENOMEM;
}
return 0;
}
void mlx4_adev_cleanup(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
kfree(priv->adev);
ida_free(&mlx4_adev_ida, priv->adev_idx);
}
static void adev_release(struct device *dev)
{
struct mlx4_adev *mlx4_adev =
container_of(dev, struct mlx4_adev, adev.dev);
struct mlx4_priv *priv = mlx4_priv(mlx4_adev->mdev);
int idx = mlx4_adev->idx;
kfree(mlx4_adev);
priv->adev[idx] = NULL;
}
static struct mlx4_adev *add_adev(struct mlx4_dev *dev, int idx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
const char *suffix = mlx4_adev_devices[idx].suffix;
struct auxiliary_device *adev;
struct mlx4_adev *madev;
int ret;
madev = kzalloc(sizeof(*madev), GFP_KERNEL);
if (!madev)
return ERR_PTR(-ENOMEM);
adev = &madev->adev;
adev->id = priv->adev_idx;
adev->name = suffix;
adev->dev.parent = &dev->persist->pdev->dev;
adev->dev.release = adev_release;
madev->mdev = dev;
madev->idx = idx;
ret = auxiliary_device_init(adev);
if (ret) {
kfree(madev);
return ERR_PTR(ret);
}
ret = auxiliary_device_add(adev);
if (ret) {
auxiliary_device_uninit(adev);
return ERR_PTR(ret);
}
return madev;
}
static void del_adev(struct auxiliary_device *adev)
{
auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
}
int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv)
{
return auxiliary_driver_register(&madrv->adrv);
}
EXPORT_SYMBOL_GPL(mlx4_register_auxiliary_driver);
void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv)
{
auxiliary_driver_unregister(&madrv->adrv);
}
EXPORT_SYMBOL_GPL(mlx4_unregister_auxiliary_driver);
int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, ret;
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
return -EOPNOTSUPP;
ret = mlx4_disable_rx_port_check(dev, enable);
if (ret) {
mlx4_err(dev, "Fail to %s rx port check\n",
enable ? "enable" : "disable");
return ret;
}
if (enable) {
dev->flags |= MLX4_FLAG_BONDED;
} else {
ret = mlx4_virt2phy_port_map(dev, 1, 2);
if (ret) {
mlx4_err(dev, "Fail to reset port map\n");
return ret;
}
dev->flags &= ~MLX4_FLAG_BONDED;
}
mutex_lock(&intf_mutex);
for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) {
struct mlx4_adev *madev = priv->adev[i];
struct mlx4_adrv *madrv;
enum mlx4_protocol protocol;
if (!madev)
continue;
device_lock(&madev->adev.dev);
if (!madev->adev.dev.driver) {
device_unlock(&madev->adev.dev);
continue;
}
madrv = container_of(madev->adev.dev.driver, struct mlx4_adrv,
adrv.driver);
if (!(madrv->flags & MLX4_INTFF_BONDING)) {
device_unlock(&madev->adev.dev);
continue;
}
if (mlx4_is_mfunc(dev)) {
mlx4_dbg(dev,
"SRIOV, disabled HA mode for intf proto %d\n",
madrv->protocol);
device_unlock(&madev->adev.dev);
continue;
}
protocol = madrv->protocol;
device_unlock(&madev->adev.dev);
del_adev(&madev->adev);
priv->adev[i] = add_adev(dev, i);
if (IS_ERR(priv->adev[i])) {
mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i,
mlx4_adev_devices[i].suffix);
priv->adev[i] = NULL;
continue;
}
mlx4_dbg(dev,
"Interface for protocol %d restarted with bonded mode %s\n",
protocol, enable ? "enabled" : "disabled");
}
mutex_unlock(&intf_mutex);
return 0;
}
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
void *param)
{
struct mlx4_priv *priv = mlx4_priv(dev);
atomic_notifier_call_chain(&priv->event_nh, type, param);
}
int mlx4_register_event_notifier(struct mlx4_dev *dev,
struct notifier_block *nb)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return atomic_notifier_chain_register(&priv->event_nh, nb);
}
EXPORT_SYMBOL(mlx4_register_event_notifier);
int mlx4_unregister_event_notifier(struct mlx4_dev *dev,
struct notifier_block *nb)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return atomic_notifier_chain_unregister(&priv->event_nh, nb);
}
EXPORT_SYMBOL(mlx4_unregister_event_notifier);
static int add_drivers(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(mlx4_adev_devices); i++) {
bool is_supported = false;
if (priv->adev[i])
continue;
if (mlx4_adev_devices[i].is_supported)
is_supported = mlx4_adev_devices[i].is_supported(dev);
if (!is_supported)
continue;
priv->adev[i] = add_adev(dev, i);
if (IS_ERR(priv->adev[i])) {
mlx4_warn(dev, "Device[%d] (%s) failed to load\n", i,
mlx4_adev_devices[i].suffix);
/* We continue to rescan drivers and leave to the caller
* to make decision if to release everything or
* continue. */
ret = PTR_ERR(priv->adev[i]);
priv->adev[i] = NULL;
}
}
return ret;
}
static void delete_drivers(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
bool delete_all;
int i;
delete_all = !(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP);
for (i = ARRAY_SIZE(mlx4_adev_devices) - 1; i >= 0; i--) {
bool is_supported = false;
if (!priv->adev[i])
continue;
if (mlx4_adev_devices[i].is_supported && !delete_all)
is_supported = mlx4_adev_devices[i].is_supported(dev);
if (is_supported)
continue;
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
}
/* This function is used after mlx4_dev is reconfigured.
*/
static int rescan_drivers_locked(struct mlx4_dev *dev)
{
lockdep_assert_held(&intf_mutex);
delete_drivers(dev);
if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
return 0;
return add_drivers(dev);
}
int mlx4_register_device(struct mlx4_dev *dev)
{
int ret;
mutex_lock(&intf_mutex);
dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
ret = rescan_drivers_locked(dev);
mutex_unlock(&intf_mutex);
if (ret) {
mlx4_unregister_device(dev);
return ret;
}
mlx4_start_catas_poll(dev);
return ret;
}
void mlx4_unregister_device(struct mlx4_dev *dev)
{
if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
return;
mlx4_stop_catas_poll(dev);
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
mlx4_is_slave(dev)) {
/* In mlx4_remove_one on a VF */
u32 slave_read =
swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
if (mlx4_comm_internal_err(slave_read)) {
mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
__func__);
mlx4_enter_error_state(dev->persist);
}
}
mutex_lock(&intf_mutex);
dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
rescan_drivers_locked(dev);
mutex_unlock(&intf_mutex);
}
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
return &info->devlink_port;
}
EXPORT_SYMBOL_GPL(mlx4_get_devlink_port);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/intf.c
|
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
#include "mlx4.h"
#include "icm.h"
/* QP to support BF should have bits 6,7 cleared */
#define MLX4_BF_QP_SKIP_MASK 0xc0
#define MLX4_MAX_BF_QP_RANGE 0x40
void mlx4_put_qp(struct mlx4_qp *qp)
{
if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free);
}
EXPORT_SYMBOL_GPL(mlx4_put_qp);
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
struct mlx4_qp *qp;
spin_lock(&qp_table->lock);
qp = __mlx4_qp_lookup(dev, qpn);
if (qp)
refcount_inc(&qp->refcount);
spin_unlock(&qp_table->lock);
if (!qp) {
mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
return;
}
/* Need to call mlx4_put_qp() in event handler */
qp->event(qp, event_type);
}
/* used for INIT/CLOSE port logic */
static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
{
/* this procedure is called after we already know we are on the master */
/* qp0 is either the proxy qp0, or the real qp0 */
u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
*proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
*real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
qp->qpn <= dev->phys_caps.base_sqpn + 1;
return *real_qp0 || *proxy_qp0;
}
static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
struct mlx4_qp_context *context,
enum mlx4_qp_optpar optpar,
int sqd_event, struct mlx4_qp *qp, int native)
{
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
[MLX4_QP_STATE_RST] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
},
[MLX4_QP_STATE_INIT] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
[MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
},
[MLX4_QP_STATE_RTR] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
},
[MLX4_QP_STATE_RTS] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
[MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
},
[MLX4_QP_STATE_SQD] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
[MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
},
[MLX4_QP_STATE_SQER] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
},
[MLX4_QP_STATE_ERR] = {
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
}
};
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
int real_qp0 = 0;
int proxy_qp0 = 0;
u8 port;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
ret = mlx4_cmd(dev, 0, qp->qpn, 2,
MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
cur_state != MLX4_QP_STATE_RST &&
is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
port = (qp->qpn & 1) + 1;
if (proxy_qp0)
priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
else
priv->mfunc.master.qp0_state[port].qp0_active = 0;
}
return ret;
}
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
context->mtt_base_addr_h = mtt_addr >> 32;
context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
if ((cur_state == MLX4_QP_STATE_RTR) &&
(new_state == MLX4_QP_STATE_RTS) &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
context->roce_entropy =
cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof(*context));
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
ret = mlx4_cmd(dev, mailbox->dma,
qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
port = (qp->qpn & 1) + 1;
if (cur_state != MLX4_QP_STATE_ERR &&
cur_state != MLX4_QP_STATE_RST &&
new_state == MLX4_QP_STATE_ERR) {
if (proxy_qp0)
priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
else
priv->mfunc.master.qp0_state[port].qp0_active = 0;
} else if (new_state == MLX4_QP_STATE_RTR) {
if (proxy_qp0)
priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
else
priv->mfunc.master.qp0_state[port].qp0_active = 1;
}
}
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
struct mlx4_qp_context *context,
enum mlx4_qp_optpar optpar,
int sqd_event, struct mlx4_qp *qp)
{
return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
optpar, sqd_event, qp, 0);
}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags)
{
u32 uid;
int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
return -ENOMEM;
uid = MLX4_QP_TABLE_ZONE_GENERAL;
if (flags & (u8)MLX4_RESERVE_A0_QP) {
if (bf_qp)
uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
else
uid = MLX4_QP_TABLE_ZONE_RSS;
}
*base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
if (*base == -1)
return -ENOMEM;
return 0;
}
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags, u8 usage)
{
u32 in_modifier = RES_QP | (((u32)usage & 3) << 30);
u64 in_param = 0;
u64 out_param;
int err;
/* Turn off all unsupported QP allocation flags */
flags &= dev->caps.alloc_res_qp_mask;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
set_param_h(&in_param, align);
err = mlx4_cmd_imm(dev, in_param, &out_param,
in_modifier, RES_OP_RESERVE,
MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
return err;
*base = get_param_l(&out_param);
return 0;
}
return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
return;
mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
}
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
u64 in_param = 0;
int err;
if (!cnt)
return;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, base_qpn);
set_param_h(&in_param, cnt);
err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err) {
mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
base_qpn, cnt);
}
} else
__mlx4_qp_release_range(dev, base_qpn, cnt);
}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
return 0;
err_put_rdmarc:
mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
err_put_altc:
mlx4_table_put(dev, &qp_table->altc_table, qpn);
err_put_auxc:
mlx4_table_put(dev, &qp_table->auxc_table, qpn);
err_put_qp:
mlx4_table_put(dev, &qp_table->qp_table, qpn);
err_out:
return err;
}
static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
u64 param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(¶m, qpn);
return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM,
MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
return __mlx4_qp_alloc_icm(dev, qpn);
}
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
mlx4_table_put(dev, &qp_table->altc_table, qpn);
mlx4_table_put(dev, &qp_table->auxc_table, qpn);
mlx4_table_put(dev, &qp_table->qp_table, qpn);
}
static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
{
u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, qpn);
if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED))
mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
} else
__mlx4_qp_free_icm(dev, qpn);
}
struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
struct mlx4_qp *qp;
spin_lock_irq(&qp_table->lock);
qp = __mlx4_qp_lookup(dev, qpn);
spin_unlock_irq(&qp_table->lock);
return qp;
}
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
if (!qpn)
return -EINVAL;
qp->qpn = qpn;
err = mlx4_qp_alloc_icm(dev, qpn);
if (err)
return err;
spin_lock_irq(&qp_table->lock);
err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
(dev->caps.num_qps - 1), qp);
spin_unlock_irq(&qp_table->lock);
if (err)
goto err_icm;
refcount_set(&qp->refcount, 1);
init_completion(&qp->free);
return 0;
err_icm:
mlx4_qp_free_icm(dev, qpn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_update_qp_context *cmd;
u64 pri_addr_path_mask = 0;
u64 qp_mask = 0;
int err = 0;
if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
return -EINVAL;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cmd = (struct mlx4_update_qp_context *)mailbox->buf;
if (attr & MLX4_UPDATE_QP_SMAC) {
pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
}
if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
if (!(dev->caps.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
mlx4_warn(dev,
"Trying to set src check LB, but it isn't supported\n");
err = -EOPNOTSUPP;
goto out;
}
pri_addr_path_mask |=
1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
if (params->flags &
MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
cmd->qp_context.pri_path.fl |=
MLX4_FL_ETH_SRC_CHECK_MC_LB;
}
}
if (attr & MLX4_UPDATE_QP_VSD) {
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
}
if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
}
if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
err = -EOPNOTSUPP;
goto out;
}
qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
cmd->qp_context.qos_vport = params->qos_vport;
}
cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
cmd->qp_mask = cpu_to_be64(qp_mask);
err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_update_qp);
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
unsigned long flags;
spin_lock_irqsave(&qp_table->lock, flags);
radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
spin_unlock_irqrestore(&qp_table->lock, flags);
}
EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
mlx4_put_qp(qp);
wait_for_completion(&qp->free);
mlx4_qp_free_icm(dev, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
{
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
#define MLX4_QP_TABLE_RAW_ETH_SIZE 256
static int mlx4_create_zones(struct mlx4_dev *dev,
u32 reserved_bottom_general,
u32 reserved_top_general,
u32 reserved_bottom_rss,
u32 start_offset_rss,
u32 max_table_offset)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
int bitmap_initialized = 0;
u32 last_offset;
int k;
int err;
qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
if (NULL == qp_table->zones)
return -ENOMEM;
bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
if (NULL == bitmap) {
err = -ENOMEM;
goto free_zone;
}
err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
(1 << 23) - 1, reserved_bottom_general,
reserved_top_general);
if (err)
goto free_bitmap;
++bitmap_initialized;
err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
MLX4_ZONE_USE_RR, 0,
0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
if (err)
goto free_bitmap;
err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
reserved_bottom_rss,
reserved_bottom_rss - 1,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
reserved_bottom_rss - start_offset_rss);
if (err)
goto free_bitmap;
++bitmap_initialized;
err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
if (err)
goto free_bitmap;
last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
/* We have a single zone for the A0 steering QPs area of the FW. This area
* needs to be split into subareas. One set of subareas is for RSS QPs
* (in which qp number bits 6 and/or 7 are set); the other set of subareas
* is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
* Currently, the values returned by the FW (A0 steering area starting qp number
* and A0 steering area size) are such that there are only two subareas -- one
* for RSS and one for RAW_ETH.
*/
for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
k++) {
int size;
u32 offset = start_offset_rss;
u32 bf_mask;
u32 requested_size;
/* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
* a mask of all LSB bits set until (and not including) the first
* set bit of MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
* is 0xc0, bf_mask will be 0x3f.
*/
bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
((int)(max_table_offset - last_offset)) >=
roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
(!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
!((last_offset + requested_size - 1) &
MLX4_BF_QP_SKIP_MASK)))
size = requested_size;
else {
u32 candidate_offset =
(last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
if (last_offset & MLX4_BF_QP_SKIP_MASK)
last_offset = candidate_offset;
/* From this point, the BF bits are 0 */
if (last_offset > max_table_offset) {
/* need to skip */
size = -1;
} else {
size = min3(max_table_offset - last_offset,
bf_mask - (last_offset & bf_mask),
requested_size);
if (size < requested_size) {
int candidate_size;
candidate_size = min3(
max_table_offset - candidate_offset,
bf_mask - (last_offset & bf_mask),
requested_size);
/* We will not take this path if last_offset was
* already set above to candidate_offset
*/
if (candidate_size > size) {
last_offset = candidate_offset;
size = candidate_size;
}
}
}
}
if (size > 0) {
/* mlx4_bitmap_alloc_range will find a contiguous range of "size"
* QPs in which both bits 6 and 7 are zero, because we pass it the
* MLX4_BF_SKIP_MASK).
*/
offset = mlx4_bitmap_alloc_range(
*bitmap + MLX4_QP_TABLE_ZONE_RSS,
size, 1,
MLX4_BF_QP_SKIP_MASK);
if (offset == (u32)-1) {
err = -ENOMEM;
break;
}
last_offset = offset + size;
err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
roundup_pow_of_two(size) - 1, 0,
roundup_pow_of_two(size) - size);
} else {
/* Add an empty bitmap, we'll allocate from different zones (since
* at least one is reserved)
*/
err = mlx4_bitmap_init(*bitmap + k, 1,
MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
0);
if (!err)
mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
}
if (err)
break;
++bitmap_initialized;
err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
offset, qp_table->zones_uids + k);
if (err)
break;
}
if (err)
goto free_bitmap;
qp_table->bitmap_gen = *bitmap;
return err;
free_bitmap:
for (k = 0; k < bitmap_initialized; k++)
mlx4_bitmap_cleanup(*bitmap + k);
kfree(bitmap);
free_zone:
mlx4_zone_allocator_destroy(qp_table->zones);
return err;
}
static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
if (qp_table->zones) {
int i;
for (i = 0;
i < ARRAY_SIZE(qp_table->zones_uids);
i++) {
struct mlx4_bitmap *bitmap =
mlx4_zone_get_bitmap(qp_table->zones,
qp_table->zones_uids[i]);
mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
if (NULL == bitmap)
continue;
mlx4_bitmap_cleanup(bitmap);
}
mlx4_zone_allocator_destroy(qp_table->zones);
kfree(qp_table->bitmap_gen);
qp_table->bitmap_gen = NULL;
qp_table->zones = NULL;
}
}
int mlx4_init_qp_table(struct mlx4_dev *dev)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err;
int reserved_from_top = 0;
int reserved_from_bot;
int k;
int fixed_reserved_from_bot_rv = 0;
int bottom_reserved_for_rss_bitmap;
u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
dev->caps.dmfs_high_rate_qpn_range;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
if (mlx4_is_slave(dev))
return 0;
/* We reserve 2 extra QPs per port for the special QPs. The
* block of special QPs must be aligned to a multiple of 8, so
* round up.
*
* We also reserve the MSB of the 24-bit QP number to indicate
* that a QP is an XRC QP.
*/
for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
if (fixed_reserved_from_bot_rv < max_table_offset)
fixed_reserved_from_bot_rv = max_table_offset;
/* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
bottom_reserved_for_rss_bitmap =
roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
{
int sort[MLX4_NUM_QP_REGION];
int i, j;
int last_base = dev->caps.num_qps;
for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
sort[i] = i;
for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
if (dev->caps.reserved_qps_cnt[sort[j]] >
dev->caps.reserved_qps_cnt[sort[j - 1]])
swap(sort[j], sort[j - 1]);
}
}
for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
last_base -= dev->caps.reserved_qps_cnt[sort[i]];
dev->caps.reserved_qps_base[sort[i]] = last_base;
reserved_from_top +=
dev->caps.reserved_qps_cnt[sort[i]];
}
}
/* Reserve 8 real SQPs in both native and SRIOV modes.
* In addition, in SRIOV mode, reserve 8 proxy SQPs per function
* (for all PFs and VFs), and 8 corresponding tunnel QPs.
* Each proxy SQP works opposite its own tunnel QP.
*
* The QPs are arranged as follows:
* a. 8 real SQPs
* b. All the proxy SQPs (8 per function)
* c. All the tunnel QPs (8 per function)
*/
reserved_from_bot = mlx4_num_reserved_sqps(dev);
if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
return -EINVAL;
}
err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
bottom_reserved_for_rss_bitmap,
fixed_reserved_from_bot_rv,
max_table_offset);
if (err)
return err;
if (mlx4_is_mfunc(dev)) {
/* for PPF use */
dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
/* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
* since the PF does not call mlx4_slave_caps */
dev->caps.spec_qps = kcalloc(dev->caps.num_ports,
sizeof(*dev->caps.spec_qps),
GFP_KERNEL);
if (!dev->caps.spec_qps) {
err = -ENOMEM;
goto err_mem;
}
for (k = 0; k < dev->caps.num_ports; k++) {
dev->caps.spec_qps[k].qp0_proxy = dev->phys_caps.base_proxy_sqpn +
8 * mlx4_master_func_num(dev) + k;
dev->caps.spec_qps[k].qp0_tunnel = dev->caps.spec_qps[k].qp0_proxy + 8 * MLX4_MFUNC_MAX;
dev->caps.spec_qps[k].qp1_proxy = dev->phys_caps.base_proxy_sqpn +
8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
dev->caps.spec_qps[k].qp1_tunnel = dev->caps.spec_qps[k].qp1_proxy + 8 * MLX4_MFUNC_MAX;
}
}
err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
if (err)
goto err_mem;
return err;
err_mem:
kfree(dev->caps.spec_qps);
dev->caps.spec_qps = NULL;
mlx4_cleanup_qp_zones(dev);
return err;
}
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
{
if (mlx4_is_slave(dev))
return;
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_cleanup_qp_zones(dev);
}
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
struct mlx4_qp_context *context)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
if (!err)
memcpy(context, mailbox->buf + 8, sizeof(*context));
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_query);
int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_qp_context *context,
struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
{
int err;
int i;
static const enum mlx4_qp_state states[] = {
MLX4_QP_STATE_RST,
MLX4_QP_STATE_INIT,
MLX4_QP_STATE_RTR,
MLX4_QP_STATE_RTS
};
for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
context->flags &= cpu_to_be32(~(0xf << 28));
context->flags |= cpu_to_be32(states[i + 1] << 28);
if (states[i + 1] != MLX4_QP_STATE_RTR)
context->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
states[i + 1], err);
return err;
}
*qp_state = states[i + 1];
}
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
{
struct mlx4_qp_context context;
struct mlx4_qp qp;
int err;
qp.qpn = qpn;
err = mlx4_qp_query(dev, &qp, &context);
if (!err) {
u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff;
u16 folded_dst = folded_qp(dest_qpn);
u16 folded_src = folded_qp(qpn);
return (dest_qpn != qpn) ?
((folded_dst ^ folded_src) | 0xC000) :
folded_src | 0xC000;
}
return 0xdead;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/qp.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx4/cq.h>
#include <linux/mlx4/qp.h>
#include <linux/mlx4/cmd.h>
#include "mlx4_en.h"
static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
{
return;
}
int mlx4_en_create_cq(struct mlx4_en_priv *priv,
struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode,
int node)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
int err;
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
if (!cq) {
en_err(priv, "Failed to allocate CQ structure\n");
return -ENOMEM;
}
cq->size = entries;
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
cq->ring = ring;
cq->type = mode;
cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
*/
set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_cq;
cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
*pcq = cq;
return 0;
err_cq:
kfree(cq);
*pcq = NULL;
return err;
}
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx)
{
struct mlx4_en_dev *mdev = priv->mdev;
int irq, err = 0;
int timestamp_en = 0;
bool assigned_eq = false;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
cq->mcq.arm_db = cq->wqres.db.db + 1;
*cq->mcq.set_ci_db = 0;
*cq->mcq.arm_db = 0;
memset(cq->buf, 0, cq->buf_size);
if (cq->type == RX) {
if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
cq->vector)) {
cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
err = mlx4_assign_eq(mdev->dev, priv->port,
&cq->vector);
if (err) {
mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
cq->vector);
goto free_eq;
}
assigned_eq = true;
}
irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
cq->aff_mask = irq_get_effective_affinity_mask(irq);
} else {
/* For TX we use the same irq per
ring we assigned for the RX */
struct mlx4_en_cq *rx_cq;
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
}
if (cq->type == RX)
cq->size = priv->rx_ring[cq->ring]->actual_size;
if ((cq->type != RX && priv->hwtstamp_config.tx_type) ||
(cq->type == RX && priv->hwtstamp_config.rx_filter))
timestamp_en = 1;
cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en, &cq->wqres.buf, false);
if (err)
goto free_eq;
cq->mcq.event = mlx4_en_cq_event;
switch (cq->type) {
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq);
napi_enable(&cq->napi);
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
napi_enable(&cq->napi);
break;
case TX_XDP:
/* nothing regarding napi, it's shared with rx ring */
cq->xdp_busy = false;
break;
}
return 0;
free_eq:
if (assigned_eq)
mlx4_release_eq(mdev->dev, cq->vector);
cq->vector = mdev->dev->caps.num_comp_vectors;
return err;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq = *pcq;
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
cq->type == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector);
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
kfree(cq);
*pcq = NULL;
}
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
if (cq->type != TX_XDP) {
napi_disable(&cq->napi);
netif_napi_del(&cq->napi);
}
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
/* Set rx cq moderation parameters */
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
cq->moder_cnt, cq->moder_time);
}
void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
&priv->mdev->uar_lock);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_cq.c
|
/*
* Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/workqueue.h>
#include <linux/module.h>
#include "mlx4.h"
enum {
MLX4_CATAS_POLL_INTERVAL = 5 * HZ,
};
int mlx4_internal_err_reset = 1;
module_param_named(internal_err_reset, mlx4_internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
"Reset device on internal errors if non-zero (default 1)");
static int read_vendor_id(struct mlx4_dev *dev)
{
u16 vendor_id = 0;
int ret;
ret = pci_read_config_word(dev->persist->pdev, 0, &vendor_id);
if (ret) {
mlx4_err(dev, "Failed to read vendor ID, ret=%d\n", ret);
return ret;
}
if (vendor_id == 0xffff) {
mlx4_err(dev, "PCI can't be accessed to read vendor id\n");
return -EINVAL;
}
return 0;
}
static int mlx4_reset_master(struct mlx4_dev *dev)
{
int err = 0;
if (mlx4_is_master(dev))
mlx4_report_internal_err_comm_event(dev);
if (!pci_channel_offline(dev->persist->pdev)) {
err = read_vendor_id(dev);
/* If PCI can't be accessed to read vendor ID we assume that its
* link was disabled and chip was already reset.
*/
if (err)
return 0;
err = mlx4_reset(dev);
if (err)
mlx4_err(dev, "Fail to reset HCA\n");
}
return err;
}
static int mlx4_reset_slave(struct mlx4_dev *dev)
{
#define COM_CHAN_RST_REQ_OFFSET 0x10
#define COM_CHAN_RST_ACK_OFFSET 0x08
u32 comm_flags;
u32 rst_req;
u32 rst_ack;
unsigned long end;
struct mlx4_priv *priv = mlx4_priv(dev);
if (pci_channel_offline(dev->persist->pdev))
return 0;
comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
MLX4_COMM_CHAN_FLAGS));
if (comm_flags == 0xffffffff) {
mlx4_err(dev, "VF reset is not needed\n");
return 0;
}
if (!(dev->caps.vf_caps & MLX4_VF_CAP_FLAG_RESET)) {
mlx4_err(dev, "VF reset is not supported\n");
return -EOPNOTSUPP;
}
rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
COM_CHAN_RST_REQ_OFFSET;
rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
COM_CHAN_RST_ACK_OFFSET;
if (rst_req != rst_ack) {
mlx4_err(dev, "Communication channel isn't sync, fail to send reset\n");
return -EIO;
}
rst_req ^= 1;
mlx4_warn(dev, "VF is sending reset request to Firmware\n");
comm_flags = rst_req << COM_CHAN_RST_REQ_OFFSET;
__raw_writel((__force u32)cpu_to_be32(comm_flags),
(__iomem char *)priv->mfunc.comm + MLX4_COMM_CHAN_FLAGS);
end = msecs_to_jiffies(MLX4_COMM_TIME) + jiffies;
while (time_before(jiffies, end)) {
comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
MLX4_COMM_CHAN_FLAGS));
rst_ack = (comm_flags & (u32)(1 << COM_CHAN_RST_ACK_OFFSET)) >>
COM_CHAN_RST_ACK_OFFSET;
/* Reading rst_req again since the communication channel can
* be reset at any time by the PF and all its bits will be
* set to zero.
*/
rst_req = (comm_flags & (u32)(1 << COM_CHAN_RST_REQ_OFFSET)) >>
COM_CHAN_RST_REQ_OFFSET;
if (rst_ack == rst_req) {
mlx4_warn(dev, "VF Reset succeed\n");
return 0;
}
cond_resched();
}
mlx4_err(dev, "Fail to send reset over the communication channel\n");
return -ETIMEDOUT;
}
int mlx4_comm_internal_err(u32 slave_read)
{
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
}
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
{
int err;
struct mlx4_dev *dev;
if (!mlx4_internal_err_reset)
return;
mutex_lock(&persist->device_state_mutex);
if (persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
goto out;
dev = persist->dev;
mlx4_err(dev, "device is going to be reset\n");
if (mlx4_is_slave(dev)) {
err = mlx4_reset_slave(dev);
} else {
mlx4_crdump_collect(dev);
err = mlx4_reset_master(dev);
}
if (!err) {
mlx4_err(dev, "device was reset successfully\n");
} else {
/* EEH could have disabled the PCI channel during reset. That's
* recoverable and the PCI error flow will handle it.
*/
if (!pci_channel_offline(dev->persist->pdev))
BUG_ON(1);
}
dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&persist->device_state_mutex);
/* At that step HW was already reset, now notify clients */
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, NULL);
mlx4_cmd_wake_completions(dev);
return;
out:
mutex_unlock(&persist->device_state_mutex);
}
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
{
struct mlx4_dev *dev = persist->dev;
struct devlink *devlink;
int err = 0;
mlx4_enter_error_state(persist);
devlink = priv_to_devlink(mlx4_priv(dev));
devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
err = mlx4_restart_one(persist->pdev);
mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
err);
}
mutex_unlock(&persist->interface_state_mutex);
devl_unlock(devlink);
}
static void dump_err_buf(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
mlx4_err(dev, "Internal error detected:\n");
for (i = 0; i < priv->fw.catas_size; ++i)
mlx4_err(dev, " buf[%02x]: %08x\n",
i, swab32(readl(priv->catas_err.map + i)));
}
static void poll_catas(struct timer_list *t)
{
struct mlx4_priv *priv = from_timer(priv, t, catas_err.timer);
struct mlx4_dev *dev = &priv->dev;
u32 slave_read;
if (mlx4_is_slave(dev)) {
slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
if (mlx4_comm_internal_err(slave_read)) {
mlx4_warn(dev, "Internal error detected on the communication channel\n");
goto internal_err;
}
} else if (readl(priv->catas_err.map)) {
dump_err_buf(dev);
goto internal_err;
}
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mlx4_warn(dev, "Internal error mark was detected on device\n");
goto internal_err;
}
mod_timer(&priv->catas_err.timer,
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
return;
internal_err:
if (mlx4_internal_err_reset)
queue_work(dev->persist->catas_wq, &dev->persist->catas_work);
}
static void catas_reset(struct work_struct *work)
{
struct mlx4_dev_persistent *persist =
container_of(work, struct mlx4_dev_persistent,
catas_work);
mlx4_handle_error_state(persist);
}
void mlx4_start_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
phys_addr_t addr;
INIT_LIST_HEAD(&priv->catas_err.list);
timer_setup(&priv->catas_err.timer, poll_catas, 0);
priv->catas_err.map = NULL;
if (!mlx4_is_slave(dev)) {
addr = pci_resource_start(dev->persist->pdev,
priv->fw.catas_bar) +
priv->fw.catas_offset;
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
if (!priv->catas_err.map) {
mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n",
(unsigned long long)addr);
return;
}
}
priv->catas_err.timer.expires =
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL);
add_timer(&priv->catas_err.timer);
}
void mlx4_stop_catas_poll(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
del_timer_sync(&priv->catas_err.timer);
if (priv->catas_err.map) {
iounmap(priv->catas_err.map);
priv->catas_err.map = NULL;
}
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION)
flush_workqueue(dev->persist->catas_wq);
}
int mlx4_catas_init(struct mlx4_dev *dev)
{
INIT_WORK(&dev->persist->catas_work, catas_reset);
dev->persist->catas_wq = create_singlethread_workqueue("mlx4_health");
if (!dev->persist->catas_wq)
return -ENOMEM;
return 0;
}
void mlx4_catas_end(struct mlx4_dev *dev)
{
if (dev->persist->catas_wq) {
destroy_workqueue(dev->persist->catas_wq);
dev->persist->catas_wq = NULL;
}
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/catas.c
|
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
* All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include "mlx4.h"
#include "fw.h"
#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
#define MLX4_PF_COUNTERS_PER_PORT 2
#define MLX4_VF_COUNTERS_PER_PORT 1
struct mac_res {
struct list_head list;
u64 mac;
int ref_count;
u8 smac_index;
u8 port;
};
struct vlan_res {
struct list_head list;
u16 vlan;
int ref_count;
int vlan_index;
u8 port;
};
struct res_common {
struct list_head list;
struct rb_node node;
u64 res_id;
int owner;
int state;
int from_state;
int to_state;
int removing;
const char *func_name;
};
enum {
RES_ANY_BUSY = 1
};
struct res_gid {
struct list_head list;
u8 gid[16];
enum mlx4_protocol prot;
enum mlx4_steer_type steer;
u64 reg_id;
};
enum res_qp_states {
RES_QP_BUSY = RES_ANY_BUSY,
/* QP number was allocated */
RES_QP_RESERVED,
/* ICM memory for QP context was mapped */
RES_QP_MAPPED,
/* QP is in hw ownership */
RES_QP_HW
};
struct res_qp {
struct res_common com;
struct res_mtt *mtt;
struct res_cq *rcq;
struct res_cq *scq;
struct res_srq *srq;
struct list_head mcg_list;
spinlock_t mcg_spl;
int local_qpn;
atomic_t ref_count;
u32 qpc_flags;
/* saved qp params before VST enforcement in order to restore on VGT */
u8 sched_queue;
__be32 param3;
u8 vlan_control;
u8 fvl_rx;
u8 pri_path_fl;
u8 vlan_index;
u8 feup;
};
enum res_mtt_states {
RES_MTT_BUSY = RES_ANY_BUSY,
RES_MTT_ALLOCATED,
};
static inline const char *mtt_states_str(enum res_mtt_states state)
{
switch (state) {
case RES_MTT_BUSY: return "RES_MTT_BUSY";
case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
default: return "Unknown";
}
}
struct res_mtt {
struct res_common com;
int order;
atomic_t ref_count;
};
enum res_mpt_states {
RES_MPT_BUSY = RES_ANY_BUSY,
RES_MPT_RESERVED,
RES_MPT_MAPPED,
RES_MPT_HW,
};
struct res_mpt {
struct res_common com;
struct res_mtt *mtt;
int key;
};
enum res_eq_states {
RES_EQ_BUSY = RES_ANY_BUSY,
RES_EQ_RESERVED,
RES_EQ_HW,
};
struct res_eq {
struct res_common com;
struct res_mtt *mtt;
};
enum res_cq_states {
RES_CQ_BUSY = RES_ANY_BUSY,
RES_CQ_ALLOCATED,
RES_CQ_HW,
};
struct res_cq {
struct res_common com;
struct res_mtt *mtt;
atomic_t ref_count;
};
enum res_srq_states {
RES_SRQ_BUSY = RES_ANY_BUSY,
RES_SRQ_ALLOCATED,
RES_SRQ_HW,
};
struct res_srq {
struct res_common com;
struct res_mtt *mtt;
struct res_cq *cq;
atomic_t ref_count;
};
enum res_counter_states {
RES_COUNTER_BUSY = RES_ANY_BUSY,
RES_COUNTER_ALLOCATED,
};
struct res_counter {
struct res_common com;
int port;
};
enum res_xrcdn_states {
RES_XRCD_BUSY = RES_ANY_BUSY,
RES_XRCD_ALLOCATED,
};
struct res_xrcdn {
struct res_common com;
int port;
};
enum res_fs_rule_states {
RES_FS_RULE_BUSY = RES_ANY_BUSY,
RES_FS_RULE_ALLOCATED,
};
struct res_fs_rule {
struct res_common com;
int qpn;
/* VF DMFS mbox with port flipped */
void *mirr_mbox;
/* > 0 --> apply mirror when getting into HA mode */
/* = 0 --> un-apply mirror when getting out of HA mode */
u32 mirr_mbox_size;
struct list_head mirr_list;
u64 mirr_rule_id;
};
static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
{
struct rb_node *node = root->rb_node;
while (node) {
struct res_common *res = rb_entry(node, struct res_common,
node);
if (res_id < res->res_id)
node = node->rb_left;
else if (res_id > res->res_id)
node = node->rb_right;
else
return res;
}
return NULL;
}
static int res_tracker_insert(struct rb_root *root, struct res_common *res)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
/* Figure out where to put new node */
while (*new) {
struct res_common *this = rb_entry(*new, struct res_common,
node);
parent = *new;
if (res->res_id < this->res_id)
new = &((*new)->rb_left);
else if (res->res_id > this->res_id)
new = &((*new)->rb_right);
else
return -EEXIST;
}
/* Add new node and rebalance tree. */
rb_link_node(&res->node, parent, new);
rb_insert_color(&res->node, root);
return 0;
}
enum qp_transition {
QP_TRANS_INIT2RTR,
QP_TRANS_RTR2RTS,
QP_TRANS_RTS2RTS,
QP_TRANS_SQERR2RTS,
QP_TRANS_SQD2SQD,
QP_TRANS_SQD2RTS
};
/* For Debug uses */
static const char *resource_str(enum mlx4_resource rt)
{
switch (rt) {
case RES_QP: return "RES_QP";
case RES_CQ: return "RES_CQ";
case RES_SRQ: return "RES_SRQ";
case RES_MPT: return "RES_MPT";
case RES_MTT: return "RES_MTT";
case RES_MAC: return "RES_MAC";
case RES_VLAN: return "RES_VLAN";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
}
}
static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
enum mlx4_resource res_type, int count,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int err = -EDQUOT;
int allocated, free, reserved, guaranteed, from_free;
int from_rsvd;
if (slave > dev->persist->num_vfs)
return -EINVAL;
spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ?
res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] :
res_alloc->allocated[slave];
free = (port > 0) ? res_alloc->res_port_free[port - 1] :
res_alloc->res_free;
reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
res_alloc->res_reserved;
guaranteed = res_alloc->guaranteed[slave];
if (allocated + count > res_alloc->quota[slave]) {
mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
slave, port, resource_str(res_type), count,
allocated, res_alloc->quota[slave]);
goto out;
}
if (allocated + count <= guaranteed) {
err = 0;
from_rsvd = count;
} else {
/* portion may need to be obtained from free area */
if (guaranteed - allocated > 0)
from_free = count - (guaranteed - allocated);
else
from_free = count;
from_rsvd = count - from_free;
if (free - from_free >= reserved)
err = 0;
else
mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
slave, port, resource_str(res_type), free,
from_free, reserved);
}
if (!err) {
/* grant the request */
if (port > 0) {
res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] += count;
res_alloc->res_port_free[port - 1] -= count;
res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
} else {
res_alloc->allocated[slave] += count;
res_alloc->res_free -= count;
res_alloc->res_reserved -= from_rsvd;
}
}
out:
spin_unlock(&res_alloc->alloc_lock);
return err;
}
static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
enum mlx4_resource res_type, int count,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int allocated, guaranteed, from_rsvd;
if (slave > dev->persist->num_vfs)
return;
spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ?
res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] :
res_alloc->allocated[slave];
guaranteed = res_alloc->guaranteed[slave];
if (allocated - count >= guaranteed) {
from_rsvd = 0;
} else {
/* portion may need to be returned to reserved area */
if (allocated - guaranteed > 0)
from_rsvd = count - (allocated - guaranteed);
else
from_rsvd = count;
}
if (port > 0) {
res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] -= count;
res_alloc->res_port_free[port - 1] += count;
res_alloc->res_port_rsvd[port - 1] += from_rsvd;
} else {
res_alloc->allocated[slave] -= count;
res_alloc->res_free += count;
res_alloc->res_reserved += from_rsvd;
}
spin_unlock(&res_alloc->alloc_lock);
return;
}
static inline void initialize_res_quotas(struct mlx4_dev *dev,
struct resource_allocator *res_alloc,
enum mlx4_resource res_type,
int vf, int num_instances)
{
res_alloc->guaranteed[vf] = num_instances /
(2 * (dev->persist->num_vfs + 1));
res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
if (vf == mlx4_master_func_num(dev)) {
res_alloc->res_free = num_instances;
if (res_type == RES_MTT) {
/* reserved mtts will be taken out of the PF allocation */
res_alloc->res_free += dev->caps.reserved_mtts;
res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
res_alloc->quota[vf] += dev->caps.reserved_mtts;
}
}
}
void mlx4_init_quotas(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int pf;
/* quotas for VFs are initialized in mlx4_slave_cap */
if (mlx4_is_slave(dev))
return;
if (!mlx4_is_mfunc(dev)) {
dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
mlx4_num_reserved_sqps(dev);
dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
return;
}
pf = mlx4_master_func_num(dev);
dev->quotas.qp =
priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
dev->quotas.cq =
priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
dev->quotas.srq =
priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
dev->quotas.mtt =
priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
dev->quotas.mpt =
priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
}
static int
mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
struct resource_allocator *res_alloc,
int vf)
{
struct mlx4_active_ports actv_ports;
int ports, counters_guaranteed;
/* For master, only allocate according to the number of phys ports */
if (vf == mlx4_master_func_num(dev))
return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
/* calculate real number of ports for the VF */
actv_ports = mlx4_get_active_ports(dev, vf);
ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
/* If we do not have enough counters for this VF, do not
* allocate any for it. '-1' to reduce the sink counter.
*/
if ((res_alloc->res_reserved + counters_guaranteed) >
(dev->caps.max_counters - 1))
return 0;
return counters_guaranteed;
}
int mlx4_init_resource_tracker(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, j;
int t;
priv->mfunc.master.res_tracker.slave_list =
kcalloc(dev->num_slaves, sizeof(struct slave_list),
GFP_KERNEL);
if (!priv->mfunc.master.res_tracker.slave_list)
return -ENOMEM;
for (i = 0 ; i < dev->num_slaves; i++) {
for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
slave_list[i].res_list[t]);
mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
}
mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
dev->num_slaves);
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[i];
res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1,
sizeof(int),
GFP_KERNEL);
res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1,
sizeof(int),
GFP_KERNEL);
if (i == RES_MAC || i == RES_VLAN)
res_alloc->allocated =
kcalloc(MLX4_MAX_PORTS *
(dev->persist->num_vfs + 1),
sizeof(int), GFP_KERNEL);
else
res_alloc->allocated =
kcalloc(dev->persist->num_vfs + 1,
sizeof(int), GFP_KERNEL);
/* Reduce the sink counter */
if (i == RES_COUNTER)
res_alloc->res_free = dev->caps.max_counters - 1;
if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated)
goto no_mem_err;
spin_lock_init(&res_alloc->alloc_lock);
for (t = 0; t < dev->persist->num_vfs + 1; t++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, t);
switch (i) {
case RES_QP:
initialize_res_quotas(dev, res_alloc, RES_QP,
t, dev->caps.num_qps -
dev->caps.reserved_qps -
mlx4_num_reserved_sqps(dev));
break;
case RES_CQ:
initialize_res_quotas(dev, res_alloc, RES_CQ,
t, dev->caps.num_cqs -
dev->caps.reserved_cqs);
break;
case RES_SRQ:
initialize_res_quotas(dev, res_alloc, RES_SRQ,
t, dev->caps.num_srqs -
dev->caps.reserved_srqs);
break;
case RES_MPT:
initialize_res_quotas(dev, res_alloc, RES_MPT,
t, dev->caps.num_mpts -
dev->caps.reserved_mrws);
break;
case RES_MTT:
initialize_res_quotas(dev, res_alloc, RES_MTT,
t, dev->caps.num_mtts -
dev->caps.reserved_mtts);
break;
case RES_MAC:
if (t == mlx4_master_func_num(dev)) {
int max_vfs_pport = 0;
/* Calculate the max vfs per port for */
/* both ports. */
for (j = 0; j < dev->caps.num_ports;
j++) {
struct mlx4_slaves_pport slaves_pport =
mlx4_phys_to_slaves_pport(dev, j + 1);
unsigned current_slaves =
bitmap_weight(slaves_pport.slaves,
dev->caps.num_ports) - 1;
if (max_vfs_pport < current_slaves)
max_vfs_pport =
current_slaves;
}
res_alloc->quota[t] =
MLX4_MAX_MAC_NUM -
2 * max_vfs_pport;
res_alloc->guaranteed[t] = 2;
for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_free[j] =
MLX4_MAX_MAC_NUM;
} else {
res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
res_alloc->guaranteed[t] = 2;
}
break;
case RES_VLAN:
if (t == mlx4_master_func_num(dev)) {
res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
for (j = 0; j < MLX4_MAX_PORTS; j++)
res_alloc->res_port_free[j] =
res_alloc->quota[t];
} else {
res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
res_alloc->guaranteed[t] = 0;
}
break;
case RES_COUNTER:
res_alloc->quota[t] = dev->caps.max_counters;
res_alloc->guaranteed[t] =
mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
break;
default:
break;
}
if (i == RES_MAC || i == RES_VLAN) {
for (j = 0; j < dev->caps.num_ports; j++)
if (test_bit(j, actv_ports.ports))
res_alloc->res_port_rsvd[j] +=
res_alloc->guaranteed[t];
} else {
res_alloc->res_reserved += res_alloc->guaranteed[t];
}
}
}
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
return 0;
no_mem_err:
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
}
return -ENOMEM;
}
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
enum mlx4_res_tracker_free_type type)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
if (priv->mfunc.master.res_tracker.slave_list) {
if (type != RES_TR_FREE_STRUCTS_ONLY) {
for (i = 0; i < dev->num_slaves; i++) {
if (type == RES_TR_FREE_ALL ||
dev->caps.function != i)
mlx4_delete_all_resources_for_slave(dev, i);
}
/* free master's vlans */
i = dev->caps.function;
mlx4_reset_roce_gids(dev, i);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
rem_slave_vlans(dev, i);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
}
if (type != RES_TR_FREE_SLAVES_ONLY) {
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
}
kfree(priv->mfunc.master.res_tracker.slave_list);
priv->mfunc.master.res_tracker.slave_list = NULL;
}
}
}
static void update_pkey_index(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox)
{
u8 sched = *(u8 *)(inbox->buf + 64);
u8 orig_index = *(u8 *)(inbox->buf + 35);
u8 new_index;
struct mlx4_priv *priv = mlx4_priv(dev);
int port;
port = (sched >> 6 & 1) + 1;
new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
*(u8 *)(inbox->buf + 35) = new_index;
}
static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
u8 slave)
{
struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
int port;
if (MLX4_QP_ST_UD == ts) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (mlx4_is_eth(dev, port))
qp_ctx->pri_path.mgid_index =
mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
else
qp_ctx->pri_path.mgid_index = slave | 0x80;
} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (mlx4_is_eth(dev, port)) {
qp_ctx->pri_path.mgid_index +=
mlx4_get_base_gid_ix(dev, slave, port);
qp_ctx->pri_path.mgid_index &= 0x7f;
} else {
qp_ctx->pri_path.mgid_index = slave & 0x7F;
}
}
if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
if (mlx4_is_eth(dev, port)) {
qp_ctx->alt_path.mgid_index +=
mlx4_get_base_gid_ix(dev, slave, port);
qp_ctx->alt_path.mgid_index &= 0x7f;
} else {
qp_ctx->alt_path.mgid_index = slave & 0x7F;
}
}
}
}
static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
u8 slave, int port);
static int update_vport_qp_param(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *inbox,
u8 slave, u32 qpn)
{
struct mlx4_qp_context *qpc = inbox->buf + 8;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
u32 qp_type;
int port, err = 0;
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
priv = mlx4_priv(dev);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
err = handle_counter(dev, qpc, slave, port);
if (err)
goto out;
if (MLX4_VGT != vp_oper->state.default_vlan) {
/* the reserved QPs (special, proxy, tunnel)
* do not operate over vlans
*/
if (mlx4_is_qp_reserved(dev, qpn))
return 0;
/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
if (qp_type == MLX4_QP_ST_UD ||
(qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
*(__be32 *)inbox->buf =
cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
MLX4_QP_OPTPAR_VLAN_STRIPPING);
qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
} else {
struct mlx4_update_qp_params params = {.flags = 0};
err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
if (err)
goto out;
}
}
/* preserve IF_COUNTER flag */
qpc->pri_path.vlan_control &=
MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
} else if (0 != vp_oper->state.default_vlan) {
if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
/* vst QinQ should block untagged on TX,
* but cvlan is in payload and phv is set so
* hw see it as untagged. Block tagged instead.
*/
qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
} else { /* vst 802.1Q */
qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
}
} else { /* priority tagged */
qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
}
qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
qpc->pri_path.vlan_index = vp_oper->vlan_idx;
qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
qpc->pri_path.fl |= MLX4_FL_SV;
else
qpc->pri_path.fl |= MLX4_FL_CV;
qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
qpc->qos_vport = vp_oper->state.qos_vport;
}
if (vp_oper->state.spoofchk) {
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
}
out:
return err;
}
static int mpt_mask(struct mlx4_dev *dev)
{
return dev->caps.num_mpts - 1;
}
static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
{
switch (t) {
case RES_QP:
return "QP";
case RES_CQ:
return "CQ";
case RES_SRQ:
return "SRQ";
case RES_XRCD:
return "XRCD";
case RES_MPT:
return "MPT";
case RES_MTT:
return "MTT";
case RES_MAC:
return "MAC";
case RES_VLAN:
return "VLAN";
case RES_COUNTER:
return "COUNTER";
case RES_FS_RULE:
return "FS_RULE";
case RES_EQ:
return "EQ";
default:
return "INVALID RESOURCE";
}
}
static void *find_res(struct mlx4_dev *dev, u64 res_id,
enum mlx4_resource type)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
res_id);
}
static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type,
void *res, const char *func_name)
{
struct res_common *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, res_id, type);
if (!r) {
err = -ENONET;
goto exit;
}
if (r->state == RES_ANY_BUSY) {
mlx4_warn(dev,
"%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
func_name, slave, res_id, mlx4_resource_type_to_str(type),
r->func_name);
err = -EBUSY;
goto exit;
}
if (r->owner != slave) {
err = -EPERM;
goto exit;
}
r->from_state = r->state;
r->state = RES_ANY_BUSY;
r->func_name = func_name;
if (res)
*((struct res_common **)res) = r;
exit:
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
#define get_res(dev, slave, res_id, type, res) \
_get_res((dev), (slave), (res_id), (type), (res), __func__)
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
u64 res_id, int *slave)
{
struct res_common *r;
int err = -ENOENT;
int id = res_id;
if (type == RES_QP)
id &= 0x7fffff;
spin_lock(mlx4_tlock(dev));
r = find_res(dev, id, type);
if (r) {
*slave = r->owner;
err = 0;
}
spin_unlock(mlx4_tlock(dev));
return err;
}
static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type)
{
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, res_id, type);
if (r) {
r->state = r->from_state;
r->func_name = "";
}
spin_unlock_irq(mlx4_tlock(dev));
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int port);
static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
int counter_index)
{
struct res_common *r;
struct res_counter *counter;
int ret = 0;
if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
return ret;
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, counter_index, RES_COUNTER);
if (!r || r->owner != slave) {
ret = -EINVAL;
} else {
counter = container_of(r, struct res_counter, com);
if (!counter->port)
counter->port = port;
}
spin_unlock_irq(mlx4_tlock(dev));
return ret;
}
static int handle_unexisting_counter(struct mlx4_dev *dev,
struct mlx4_qp_context *qpc, u8 slave,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *tmp;
struct res_counter *counter;
u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry(tmp,
&tracker->slave_list[slave].res_list[RES_COUNTER],
list) {
counter = container_of(tmp, struct res_counter, com);
if (port == counter->port) {
qpc->pri_path.counter_index = counter->com.res_id;
spin_unlock_irq(mlx4_tlock(dev));
return 0;
}
}
spin_unlock_irq(mlx4_tlock(dev));
/* No existing counter, need to allocate a new counter */
err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
port);
if (err == -ENOENT) {
err = 0;
} else if (err && err != -ENOSPC) {
mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
__func__, slave, err);
} else {
qpc->pri_path.counter_index = counter_idx;
mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
__func__, slave, qpc->pri_path.counter_index);
err = 0;
}
return err;
}
static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
u8 slave, int port)
{
if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
return handle_existing_counter(dev, slave, port,
qpc->pri_path.counter_index);
return handle_unexisting_counter(dev, qpc, slave, port);
}
static struct res_common *alloc_qp_tr(int id)
{
struct res_qp *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_QP_RESERVED;
ret->local_qpn = id;
INIT_LIST_HEAD(&ret->mcg_list);
spin_lock_init(&ret->mcg_spl);
atomic_set(&ret->ref_count, 0);
return &ret->com;
}
static struct res_common *alloc_mtt_tr(int id, int order)
{
struct res_mtt *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->order = order;
ret->com.state = RES_MTT_ALLOCATED;
atomic_set(&ret->ref_count, 0);
return &ret->com;
}
static struct res_common *alloc_mpt_tr(int id, int key)
{
struct res_mpt *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_MPT_RESERVED;
ret->key = key;
return &ret->com;
}
static struct res_common *alloc_eq_tr(int id)
{
struct res_eq *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_EQ_RESERVED;
return &ret->com;
}
static struct res_common *alloc_cq_tr(int id)
{
struct res_cq *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_CQ_ALLOCATED;
atomic_set(&ret->ref_count, 0);
return &ret->com;
}
static struct res_common *alloc_srq_tr(int id)
{
struct res_srq *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_SRQ_ALLOCATED;
atomic_set(&ret->ref_count, 0);
return &ret->com;
}
static struct res_common *alloc_counter_tr(int id, int port)
{
struct res_counter *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_COUNTER_ALLOCATED;
ret->port = port;
return &ret->com;
}
static struct res_common *alloc_xrcdn_tr(int id)
{
struct res_xrcdn *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_XRCD_ALLOCATED;
return &ret->com;
}
static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
{
struct res_fs_rule *ret;
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->com.res_id = id;
ret->com.state = RES_FS_RULE_ALLOCATED;
ret->qpn = qpn;
return &ret->com;
}
static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
int extra)
{
struct res_common *ret;
switch (type) {
case RES_QP:
ret = alloc_qp_tr(id);
break;
case RES_MPT:
ret = alloc_mpt_tr(id, extra);
break;
case RES_MTT:
ret = alloc_mtt_tr(id, extra);
break;
case RES_EQ:
ret = alloc_eq_tr(id);
break;
case RES_CQ:
ret = alloc_cq_tr(id);
break;
case RES_SRQ:
ret = alloc_srq_tr(id);
break;
case RES_MAC:
pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
ret = alloc_counter_tr(id, extra);
break;
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
break;
case RES_FS_RULE:
ret = alloc_fs_rule_tr(id, extra);
break;
default:
return NULL;
}
if (ret)
ret->owner = slave;
return ret;
}
int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
struct mlx4_counter *data)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *tmp;
struct res_counter *counter;
int *counters_arr;
int i = 0, err = 0;
memset(data, 0, sizeof(*data));
counters_arr = kmalloc_array(dev->caps.max_counters,
sizeof(*counters_arr), GFP_KERNEL);
if (!counters_arr)
return -ENOMEM;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry(tmp,
&tracker->slave_list[slave].res_list[RES_COUNTER],
list) {
counter = container_of(tmp, struct res_counter, com);
if (counter->port == port) {
counters_arr[i] = (int)tmp->res_id;
i++;
}
}
spin_unlock_irq(mlx4_tlock(dev));
counters_arr[i] = -1;
i = 0;
while (counters_arr[i] != -1) {
err = mlx4_get_counter_stats(dev, counters_arr[i], data,
0);
if (err) {
memset(data, 0, sizeof(*data));
goto table_changed;
}
i++;
}
table_changed:
kfree(counters_arr);
return 0;
}
static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
int i;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
struct res_common **res_arr;
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct rb_root *root = &tracker->res_tree[type];
res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
if (!res_arr)
return -ENOMEM;
for (i = 0; i < count; ++i) {
res_arr[i] = alloc_tr(base + i, type, slave, extra);
if (!res_arr[i]) {
for (--i; i >= 0; --i)
kfree(res_arr[i]);
kfree(res_arr);
return -ENOMEM;
}
}
spin_lock_irq(mlx4_tlock(dev));
for (i = 0; i < count; ++i) {
if (find_res(dev, base + i, type)) {
err = -EEXIST;
goto undo;
}
err = res_tracker_insert(root, res_arr[i]);
if (err)
goto undo;
list_add_tail(&res_arr[i]->list,
&tracker->slave_list[slave].res_list[type]);
}
spin_unlock_irq(mlx4_tlock(dev));
kfree(res_arr);
return 0;
undo:
for (--i; i >= 0; --i) {
rb_erase(&res_arr[i]->node, root);
list_del_init(&res_arr[i]->list);
}
spin_unlock_irq(mlx4_tlock(dev));
for (i = 0; i < count; ++i)
kfree(res_arr[i]);
kfree(res_arr);
return err;
}
static int remove_qp_ok(struct res_qp *res)
{
if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
!list_empty(&res->mcg_list)) {
pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
res->com.state, atomic_read(&res->ref_count));
return -EBUSY;
} else if (res->com.state != RES_QP_RESERVED) {
return -EPERM;
}
return 0;
}
static int remove_mtt_ok(struct res_mtt *res, int order)
{
if (res->com.state == RES_MTT_BUSY ||
atomic_read(&res->ref_count)) {
pr_devel("%s-%d: state %s, ref_count %d\n",
__func__, __LINE__,
mtt_states_str(res->com.state),
atomic_read(&res->ref_count));
return -EBUSY;
} else if (res->com.state != RES_MTT_ALLOCATED)
return -EPERM;
else if (res->order != order)
return -EINVAL;
return 0;
}
static int remove_mpt_ok(struct res_mpt *res)
{
if (res->com.state == RES_MPT_BUSY)
return -EBUSY;
else if (res->com.state != RES_MPT_RESERVED)
return -EPERM;
return 0;
}
static int remove_eq_ok(struct res_eq *res)
{
if (res->com.state == RES_MPT_BUSY)
return -EBUSY;
else if (res->com.state != RES_MPT_RESERVED)
return -EPERM;
return 0;
}
static int remove_counter_ok(struct res_counter *res)
{
if (res->com.state == RES_COUNTER_BUSY)
return -EBUSY;
else if (res->com.state != RES_COUNTER_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_xrcdn_ok(struct res_xrcdn *res)
{
if (res->com.state == RES_XRCD_BUSY)
return -EBUSY;
else if (res->com.state != RES_XRCD_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_fs_rule_ok(struct res_fs_rule *res)
{
if (res->com.state == RES_FS_RULE_BUSY)
return -EBUSY;
else if (res->com.state != RES_FS_RULE_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_cq_ok(struct res_cq *res)
{
if (res->com.state == RES_CQ_BUSY)
return -EBUSY;
else if (res->com.state != RES_CQ_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_srq_ok(struct res_srq *res)
{
if (res->com.state == RES_SRQ_BUSY)
return -EBUSY;
else if (res->com.state != RES_SRQ_ALLOCATED)
return -EPERM;
return 0;
}
static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
{
switch (type) {
case RES_QP:
return remove_qp_ok((struct res_qp *)res);
case RES_CQ:
return remove_cq_ok((struct res_cq *)res);
case RES_SRQ:
return remove_srq_ok((struct res_srq *)res);
case RES_MPT:
return remove_mpt_ok((struct res_mpt *)res);
case RES_MTT:
return remove_mtt_ok((struct res_mtt *)res, extra);
case RES_MAC:
return -EOPNOTSUPP;
case RES_EQ:
return remove_eq_ok((struct res_eq *)res);
case RES_COUNTER:
return remove_counter_ok((struct res_counter *)res);
case RES_XRCD:
return remove_xrcdn_ok((struct res_xrcdn *)res);
case RES_FS_RULE:
return remove_fs_rule_ok((struct res_fs_rule *)res);
default:
return -EINVAL;
}
}
static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
u64 i;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
for (i = base; i < base + count; ++i) {
r = res_tracker_lookup(&tracker->res_tree[type], i);
if (!r) {
err = -ENOENT;
goto out;
}
if (r->owner != slave) {
err = -EPERM;
goto out;
}
err = remove_ok(r, type, extra);
if (err)
goto out;
}
for (i = base; i < base + count; ++i) {
r = res_tracker_lookup(&tracker->res_tree[type], i);
rb_erase(&r->node, &tracker->res_tree[type]);
list_del(&r->list);
kfree(r);
}
err = 0;
out:
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
enum res_qp_states state, struct res_qp **qp,
int alloc)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_qp *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
err = -EPERM;
else {
switch (state) {
case RES_QP_BUSY:
mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
__func__, r->com.res_id);
err = -EBUSY;
break;
case RES_QP_RESERVED:
if (r->com.state == RES_QP_MAPPED && !alloc)
break;
mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
err = -EINVAL;
break;
case RES_QP_MAPPED:
if ((r->com.state == RES_QP_RESERVED && alloc) ||
r->com.state == RES_QP_HW)
break;
else {
mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
r->com.res_id);
err = -EINVAL;
}
break;
case RES_QP_HW:
if (r->com.state != RES_QP_MAPPED)
err = -EINVAL;
break;
default:
err = -EINVAL;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_QP_BUSY;
if (qp)
*qp = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
enum res_mpt_states state, struct res_mpt **mpt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_mpt *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
err = -EPERM;
else {
switch (state) {
case RES_MPT_BUSY:
err = -EINVAL;
break;
case RES_MPT_RESERVED:
if (r->com.state != RES_MPT_MAPPED)
err = -EINVAL;
break;
case RES_MPT_MAPPED:
if (r->com.state != RES_MPT_RESERVED &&
r->com.state != RES_MPT_HW)
err = -EINVAL;
break;
case RES_MPT_HW:
if (r->com.state != RES_MPT_MAPPED)
err = -EINVAL;
break;
default:
err = -EINVAL;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_MPT_BUSY;
if (mpt)
*mpt = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
enum res_eq_states state, struct res_eq **eq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_eq *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
err = -EPERM;
else {
switch (state) {
case RES_EQ_BUSY:
err = -EINVAL;
break;
case RES_EQ_RESERVED:
if (r->com.state != RES_EQ_HW)
err = -EINVAL;
break;
case RES_EQ_HW:
if (r->com.state != RES_EQ_RESERVED)
err = -EINVAL;
break;
default:
err = -EINVAL;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
}
}
spin_unlock_irq(mlx4_tlock(dev));
if (!err && eq)
*eq = r;
return err;
}
static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
enum res_cq_states state, struct res_cq **cq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_cq *r;
int err;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
if (!r) {
err = -ENOENT;
} else if (r->com.owner != slave) {
err = -EPERM;
} else if (state == RES_CQ_ALLOCATED) {
if (r->com.state != RES_CQ_HW)
err = -EINVAL;
else if (atomic_read(&r->ref_count))
err = -EBUSY;
else
err = 0;
} else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
err = -EINVAL;
} else {
err = 0;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_CQ_BUSY;
if (cq)
*cq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
enum res_srq_states state, struct res_srq **srq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_srq *r;
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
if (!r) {
err = -ENOENT;
} else if (r->com.owner != slave) {
err = -EPERM;
} else if (state == RES_SRQ_ALLOCATED) {
if (r->com.state != RES_SRQ_HW)
err = -EINVAL;
else if (atomic_read(&r->ref_count))
err = -EBUSY;
} else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
err = -EINVAL;
}
if (!err) {
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_SRQ_BUSY;
if (srq)
*srq = r;
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static void res_abort_move(struct mlx4_dev *dev, int slave,
enum mlx4_resource type, int id)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->from_state;
spin_unlock_irq(mlx4_tlock(dev));
}
static void res_end_move(struct mlx4_dev *dev, int slave,
enum mlx4_resource type, int id)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->to_state;
spin_unlock_irq(mlx4_tlock(dev));
}
static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
{
return mlx4_is_qp_reserved(dev, qpn) &&
(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
}
static int fw_reserved(struct mlx4_dev *dev, int qpn)
{
return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
}
static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int err;
int count;
int align;
int base;
int qpn;
u8 flags;
switch (op) {
case RES_OP_RESERVE:
count = get_param_l(&in_param) & 0xffffff;
/* Turn off all unsupported QP allocation flags that the
* slave tries to set.
*/
flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
align = get_param_h(&in_param);
err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err)
return err;
err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
if (err) {
mlx4_release_resource(dev, slave, RES_QP, count, 0);
return err;
}
err = add_res_range(dev, slave, base, count, RES_QP, 0);
if (err) {
mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
return err;
}
set_param_l(out_param, base);
break;
case RES_OP_MAP_ICM:
qpn = get_param_l(&in_param) & 0x7fffff;
if (valid_reserved(dev, slave, qpn)) {
err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
if (err)
return err;
}
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
NULL, 1);
if (err)
return err;
if (!fw_reserved(dev, qpn)) {
err = __mlx4_qp_alloc_icm(dev, qpn);
if (err) {
res_abort_move(dev, slave, RES_QP, qpn);
return err;
}
}
res_end_move(dev, slave, RES_QP, qpn);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int err = -EINVAL;
int base;
int order;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
order = get_param_l(&in_param);
err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
if (err)
return err;
base = __mlx4_alloc_mtt_range(dev, order);
if (base == -1) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
return -ENOMEM;
}
err = add_res_range(dev, slave, base, 1, RES_MTT, order);
if (err) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
} else {
set_param_l(out_param, base);
}
return err;
}
static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int err = -EINVAL;
int index;
int id;
struct res_mpt *mpt;
switch (op) {
case RES_OP_RESERVE:
err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
if (err)
break;
index = __mlx4_mpt_reserve(dev);
if (index == -1) {
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
break;
}
id = index & mpt_mask(dev);
err = add_res_range(dev, slave, id, 1, RES_MPT, index);
if (err) {
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
}
set_param_l(out_param, index);
break;
case RES_OP_MAP_ICM:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id,
RES_MPT_MAPPED, &mpt);
if (err)
return err;
err = __mlx4_mpt_alloc_icm(dev, mpt->key);
if (err) {
res_abort_move(dev, slave, RES_MPT, id);
return err;
}
res_end_move(dev, slave, RES_MPT, id);
break;
}
return err;
}
static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int cqn;
int err;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
if (err)
break;
err = __mlx4_cq_alloc_icm(dev, &cqn);
if (err) {
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
break;
}
err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err) {
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
}
set_param_l(out_param, cqn);
break;
default:
err = -EINVAL;
}
return err;
}
static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int srqn;
int err;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
if (err)
break;
err = __mlx4_srq_alloc_icm(dev, &srqn);
if (err) {
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
break;
}
err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err) {
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
}
set_param_l(out_param, srqn);
break;
default:
err = -EINVAL;
}
return err;
}
static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
u8 smac_index, u64 *mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->smac_index == smac_index && res->port == (u8) port) {
*mac = res->mac;
return 0;
}
}
return -ENOENT;
}
static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
/* mac found. update ref count */
++res->ref_count;
return 0;
}
}
if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
return -EINVAL;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) {
mlx4_release_resource(dev, slave, RES_MAC, 1, port);
return -ENOMEM;
}
res->mac = mac;
res->port = (u8) port;
res->smac_index = smac_index;
res->ref_count = 1;
list_add_tail(&res->list,
&tracker->slave_list[slave].res_list[RES_MAC]);
return 0;
}
static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
list_for_each_entry_safe(res, tmp, mac_list, list) {
if (res->mac == mac && res->port == (u8) port) {
if (!--res->ref_count) {
list_del(&res->list);
mlx4_release_resource(dev, slave, RES_MAC, 1, port);
kfree(res);
}
break;
}
}
}
static void rem_slave_macs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mac_list =
&tracker->slave_list[slave].res_list[RES_MAC];
struct mac_res *res, *tmp;
int i;
list_for_each_entry_safe(res, tmp, mac_list, list) {
list_del(&res->list);
/* dereference the mac the num times the slave referenced it */
for (i = 0; i < res->ref_count; i++)
__mlx4_unregister_mac(dev, res->port, res->mac);
mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
kfree(res);
}
}
static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{
int err = -EINVAL;
int port;
u64 mac;
u8 smac_index;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
port = !in_port ? get_param_l(out_param) : in_port;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0)
return -EINVAL;
mac = in_param;
err = __mlx4_register_mac(dev, port, mac);
if (err >= 0) {
smac_index = err;
set_param_l(out_param, err);
err = 0;
}
if (!err) {
err = mac_add_to_slave(dev, slave, mac, port, smac_index);
if (err)
__mlx4_unregister_mac(dev, port, mac);
}
return err;
}
static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
int port, int vlan_index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
if (res->vlan == vlan && res->port == (u8) port) {
/* vlan found. update ref count */
++res->ref_count;
return 0;
}
}
if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
return -EINVAL;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) {
mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
return -ENOMEM;
}
res->vlan = vlan;
res->port = (u8) port;
res->vlan_index = vlan_index;
res->ref_count = 1;
list_add_tail(&res->list,
&tracker->slave_list[slave].res_list[RES_VLAN]);
return 0;
}
static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
if (res->vlan == vlan && res->port == (u8) port) {
if (!--res->ref_count) {
list_del(&res->list);
mlx4_release_resource(dev, slave, RES_VLAN,
1, port);
kfree(res);
}
break;
}
}
}
static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *vlan_list =
&tracker->slave_list[slave].res_list[RES_VLAN];
struct vlan_res *res, *tmp;
int i;
list_for_each_entry_safe(res, tmp, vlan_list, list) {
list_del(&res->list);
/* dereference the vlan the num times the slave referenced it */
for (i = 0; i < res->ref_count; i++)
__mlx4_unregister_vlan(dev, res->port, res->vlan);
mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
kfree(res);
}
}
static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int err;
u16 vlan;
int vlan_index;
int port;
port = !in_port ? get_param_l(out_param) : in_port;
if (!port || op != RES_OP_RESERVE_AND_MAP)
return -EINVAL;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0)
return -EINVAL;
/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
if (!in_port && port > 0 && port <= dev->caps.num_ports) {
slave_state[slave].old_vlan_api = true;
return 0;
}
vlan = (u16) in_param;
err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
if (!err) {
set_param_l(out_param, (u32) vlan_index);
err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
if (err)
__mlx4_unregister_vlan(dev, port, vlan);
}
return err;
}
static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int port)
{
u32 index;
int err;
if (op != RES_OP_RESERVE)
return -EINVAL;
err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
if (err)
return err;
err = __mlx4_counter_alloc(dev, &index);
if (err) {
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
if (err) {
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
} else {
set_param_l(out_param, index);
}
return err;
}
static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
u32 xrcdn;
int err;
if (op != RES_OP_RESERVE)
return -EINVAL;
err = __mlx4_xrcd_alloc(dev, &xrcdn);
if (err)
return err;
err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
if (err)
__mlx4_xrcd_free(dev, xrcdn);
else
set_param_l(out_param, xrcdn);
return err;
}
int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int alop = vhcr->op_modifier;
switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MTT:
err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MPT:
err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_CQ:
err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_SRQ:
err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MAC:
err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param, 0);
break;
case RES_XRCD:
err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param)
{
int err;
int count;
int base;
int qpn;
switch (op) {
case RES_OP_RESERVE:
base = get_param_l(&in_param) & 0x7fffff;
count = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, count, RES_QP, 0);
if (err)
break;
mlx4_release_resource(dev, slave, RES_QP, count, 0);
__mlx4_qp_release_range(dev, base, count);
break;
case RES_OP_MAP_ICM:
qpn = get_param_l(&in_param) & 0x7fffff;
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
NULL, 0);
if (err)
return err;
if (!fw_reserved(dev, qpn))
__mlx4_qp_free_icm(dev, qpn);
res_end_move(dev, slave, RES_QP, qpn);
if (valid_reserved(dev, slave, qpn))
err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int err = -EINVAL;
int base;
int order;
if (op != RES_OP_RESERVE_AND_MAP)
return err;
base = get_param_l(&in_param);
order = get_param_h(&in_param);
err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
if (!err) {
mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
__mlx4_free_mtt_range(dev, base, order);
}
return err;
}
static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param)
{
int err = -EINVAL;
int index;
int id;
struct res_mpt *mpt;
switch (op) {
case RES_OP_RESERVE:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = get_res(dev, slave, id, RES_MPT, &mpt);
if (err)
break;
index = mpt->key;
put_res(dev, slave, id, RES_MPT);
err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
if (err)
break;
mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
index = get_param_l(&in_param);
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id,
RES_MPT_RESERVED, &mpt);
if (err)
return err;
__mlx4_mpt_free_icm(dev, mpt->key);
res_end_move(dev, slave, RES_MPT, id);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int cqn;
int err;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
cqn = get_param_l(&in_param);
err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
if (err)
break;
mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
__mlx4_cq_free_icm(dev, cqn);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int srqn;
int err;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
srqn = get_param_l(&in_param);
err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
if (err)
break;
mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
__mlx4_srq_free_icm(dev, srqn);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int in_port)
{
int port;
int err = 0;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
port = !in_port ? get_param_l(out_param) : in_port;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0)
return -EINVAL;
mac_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_mac(dev, port, in_param);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
int err = 0;
port = mlx4_slave_convert_port(
dev, slave, port);
if (port < 0)
return -EINVAL;
switch (op) {
case RES_OP_RESERVE_AND_MAP:
if (slave_state[slave].old_vlan_api)
return 0;
if (!port)
return -EINVAL;
vlan_del_from_slave(dev, slave, in_param, port);
__mlx4_unregister_vlan(dev, port, in_param);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int index;
int err;
if (op != RES_OP_RESERVE)
return -EINVAL;
index = get_param_l(&in_param);
if (index == MLX4_SINK_COUNTER_INDEX(dev))
return 0;
err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
if (err)
return err;
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
return err;
}
static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
u64 in_param, u64 *out_param)
{
int xrcdn;
int err;
if (op != RES_OP_RESERVE)
return -EINVAL;
xrcdn = get_param_l(&in_param);
err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
if (err)
return err;
__mlx4_xrcd_free(dev, xrcdn);
return err;
}
int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err = -EINVAL;
int alop = vhcr->op_modifier;
switch (vhcr->in_modifier & 0xFF) {
case RES_QP:
err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param);
break;
case RES_MTT:
err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MPT:
err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param);
break;
case RES_CQ:
err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_SRQ:
err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_MAC:
err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_VLAN:
err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param,
(vhcr->in_modifier >> 8) & 0xFF);
break;
case RES_COUNTER:
err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
case RES_XRCD:
err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
vhcr->in_param, &vhcr->out_param);
break;
default:
break;
}
return err;
}
/* ugly but other choices are uglier */
static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
{
return (be32_to_cpu(mpt->flags) >> 9) & 1;
}
static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
{
return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
}
static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->mtt_sz);
}
static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
}
static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
}
static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
}
static int mr_is_region(struct mlx4_mpt_entry *mpt)
{
return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
}
static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
}
static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
{
return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
}
static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
{
int page_shift = (qpc->log_page_size & 0x3f) + 12;
int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
int log_sq_sride = qpc->sq_size_stride & 7;
int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
int log_rq_stride = qpc->rq_size_stride & 7;
int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
int sq_size;
int rq_size;
int total_pages;
int total_mem;
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
int tot;
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
total_mem = sq_size + rq_size;
tot = (total_mem + (page_offset << 6)) >> page_shift;
total_pages = !tot ? 1 : roundup_pow_of_two(tot);
return total_pages;
}
static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
int size, struct res_mtt *mtt)
{
int res_start = mtt->com.res_id;
int res_size = (1 << mtt->order);
if (start < res_start || start + size > res_start + res_size)
return -EPERM;
return 0;
}
int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int index = vhcr->in_modifier;
struct res_mtt *mtt;
struct res_mpt *mpt = NULL;
int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
int phys;
int id;
u32 pd;
int pd_slave;
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
if (err)
return err;
/* Disable memory windows for VFs. */
if (!mr_is_region(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
/* Make sure that the PD bits related to the slave id are zeros. */
pd = mr_get_pd(inbox->buf);
pd_slave = (pd >> 17) & 0x7f;
if (pd_slave != 0 && --pd_slave != slave) {
err = -EPERM;
goto ex_abort;
}
if (mr_is_fmr(inbox->buf)) {
/* FMR and Bind Enable are forbidden in slave devices. */
if (mr_is_bind_enabled(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
/* FMR and Memory Windows are also forbidden. */
if (!mr_is_region(inbox->buf)) {
err = -EPERM;
goto ex_abort;
}
}
phys = mr_phys_mpt(inbox->buf);
if (!phys) {
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto ex_abort;
err = check_mtt_range(dev, slave, mtt_base,
mr_get_mtt_size(inbox->buf), mtt);
if (err)
goto ex_put;
mpt->mtt = mtt;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put;
if (!phys) {
atomic_inc(&mtt->ref_count);
put_res(dev, slave, mtt->com.res_id, RES_MTT);
}
res_end_move(dev, slave, RES_MPT, id);
return 0;
ex_put:
if (!phys)
put_res(dev, slave, mtt->com.res_id, RES_MTT);
ex_abort:
res_abort_move(dev, slave, RES_MPT, id);
return err;
}
int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int index = vhcr->in_modifier;
struct res_mpt *mpt;
int id;
id = index & mpt_mask(dev);
err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_abort;
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
res_end_move(dev, slave, RES_MPT, id);
return 0;
ex_abort:
res_abort_move(dev, slave, RES_MPT, id);
return err;
}
int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int index = vhcr->in_modifier;
struct res_mpt *mpt;
int id;
id = index & mpt_mask(dev);
err = get_res(dev, slave, id, RES_MPT, &mpt);
if (err)
return err;
if (mpt->com.from_state == RES_MPT_MAPPED) {
/* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
* that, the VF must read the MPT. But since the MPT entry memory is not
* in the VF's virtual memory space, it must use QUERY_MPT to obtain the
* entry contents. To guarantee that the MPT cannot be changed, the driver
* must perform HW2SW_MPT before this query and return the MPT entry to HW
* ownership fofollowing the change. The change here allows the VF to
* perform QUERY_MPT also when the entry is in SW ownership.
*/
struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
&mlx4_priv(dev)->mr_table.dmpt_table,
mpt->key, NULL);
if (NULL == mpt_entry || NULL == outbox->buf) {
err = -EINVAL;
goto out;
}
memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
err = 0;
} else if (mpt->com.from_state == RES_MPT_HW) {
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
} else {
err = -EBUSY;
goto out;
}
out:
put_res(dev, slave, id, RES_MPT);
return err;
}
static int qp_get_rcqn(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
}
static int qp_get_scqn(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->cqn_send) & 0xffffff;
}
static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
{
return be32_to_cpu(qpc->srqn) & 0x1ffffff;
}
static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
struct mlx4_qp_context *context)
{
u32 qpn = vhcr->in_modifier & 0xffffff;
u32 qkey = 0;
if (mlx4_get_parav_qkey(dev, qpn, &qkey))
return;
/* adjust qkey in qp context */
context->qkey = cpu_to_be32(qkey);
}
static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
struct mlx4_qp_context *qpc,
struct mlx4_cmd_mailbox *inbox);
int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_mtt *mtt;
struct res_qp *qp;
struct mlx4_qp_context *qpc = inbox->buf + 8;
int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
int mtt_size = qp_get_mtt_size(qpc);
struct res_cq *rcq;
struct res_cq *scq;
int rcqn = qp_get_rcqn(qpc);
int scqn = qp_get_scqn(qpc);
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
struct res_srq *srq;
int local_qpn = vhcr->in_modifier & 0xffffff;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
return err;
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
if (err)
return err;
qp->local_qpn = local_qpn;
qp->sched_queue = 0;
qp->param3 = 0;
qp->vlan_control = 0;
qp->fvl_rx = 0;
qp->pri_path_fl = 0;
qp->vlan_index = 0;
qp->feup = 0;
qp->qpc_flags = be32_to_cpu(qpc->flags);
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto ex_abort;
err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
if (err)
goto ex_put_mtt;
err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
if (err)
goto ex_put_mtt;
if (scqn != rcqn) {
err = get_res(dev, slave, scqn, RES_CQ, &scq);
if (err)
goto ex_put_rcq;
} else
scq = rcq;
if (use_srq) {
err = get_res(dev, slave, srqn, RES_SRQ, &srq);
if (err)
goto ex_put_scq;
}
adjust_proxy_tun_qkey(dev, vhcr, qpc);
update_pkey_index(dev, slave, inbox);
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put_srq;
atomic_inc(&mtt->ref_count);
qp->mtt = mtt;
atomic_inc(&rcq->ref_count);
qp->rcq = rcq;
atomic_inc(&scq->ref_count);
qp->scq = scq;
if (scqn != rcqn)
put_res(dev, slave, scqn, RES_CQ);
if (use_srq) {
atomic_inc(&srq->ref_count);
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
/* Save param3 for dynamic changes from VST back to VGT */
qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
return 0;
ex_put_srq:
if (use_srq)
put_res(dev, slave, srqn, RES_SRQ);
ex_put_scq:
if (scqn != rcqn)
put_res(dev, slave, scqn, RES_CQ);
ex_put_rcq:
put_res(dev, slave, rcqn, RES_CQ);
ex_put_mtt:
put_res(dev, slave, mtt_base, RES_MTT);
ex_abort:
res_abort_move(dev, slave, RES_QP, qpn);
return err;
}
static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
{
return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
}
static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
{
int log_eq_size = eqc->log_eq_size & 0x1f;
int page_shift = (eqc->log_page_size & 0x3f) + 12;
if (log_eq_size + 5 < page_shift)
return 1;
return 1 << (log_eq_size + 5 - page_shift);
}
static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
{
return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
}
static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
{
int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
int page_shift = (cqc->log_page_size & 0x3f) + 12;
if (log_cq_size + 5 < page_shift)
return 1;
return 1 << (log_cq_size + 5 - page_shift);
}
int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int eqn = vhcr->in_modifier;
int res_id = (slave << 10) | eqn;
struct mlx4_eq_context *eqc = inbox->buf;
int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
int mtt_size = eq_get_mtt_size(eqc);
struct res_eq *eq;
struct res_mtt *mtt;
err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
if (err)
return err;
err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
if (err)
goto out_add;
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto out_move;
err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
if (err)
goto out_put;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto out_put;
atomic_inc(&mtt->ref_count);
eq->mtt = mtt;
put_res(dev, slave, mtt->com.res_id, RES_MTT);
res_end_move(dev, slave, RES_EQ, res_id);
return 0;
out_put:
put_res(dev, slave, mtt->com.res_id, RES_MTT);
out_move:
res_abort_move(dev, slave, RES_EQ, res_id);
out_add:
rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
return err;
}
int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
u8 get = vhcr->op_modifier;
if (get != 1)
return -EPERM;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
return err;
}
static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
int len, struct res_mtt **res)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct res_mtt *mtt;
int err = -EINVAL;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
com.list) {
if (!check_mtt_range(dev, slave, start, len, mtt)) {
*res = mtt;
mtt->com.from_state = mtt->com.state;
mtt->com.state = RES_MTT_BUSY;
err = 0;
break;
}
}
spin_unlock_irq(mlx4_tlock(dev));
return err;
}
static int verify_qp_parameters(struct mlx4_dev *dev,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
enum qp_transition transition, u8 slave)
{
u32 qp_type;
u32 qpn;
struct mlx4_qp_context *qp_ctx;
enum mlx4_qp_optpar optpar;
int port;
int num_gids;
qp_ctx = inbox->buf + 8;
qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
optpar = be32_to_cpu(*(__be32 *) inbox->buf);
if (slave != mlx4_master_func_num(dev)) {
qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
/* setting QP rate-limit is disallowed for VFs */
if (qp_ctx->rate_limit_params)
return -EPERM;
}
switch (qp_type) {
case MLX4_QP_ST_RC:
case MLX4_QP_ST_XRC:
case MLX4_QP_ST_UC:
switch (transition) {
case QP_TRANS_INIT2RTR:
case QP_TRANS_RTR2RTS:
case QP_TRANS_RTS2RTS:
case QP_TRANS_SQD2SQD:
case QP_TRANS_SQD2RTS:
if (slave != mlx4_master_func_num(dev)) {
if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
else
num_gids = 1;
if (qp_ctx->pri_path.mgid_index >= num_gids)
return -EINVAL;
}
if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
else
num_gids = 1;
if (qp_ctx->alt_path.mgid_index >= num_gids)
return -EINVAL;
}
}
break;
default:
break;
}
break;
case MLX4_QP_ST_MLX:
qpn = vhcr->in_modifier & 0x7fffff;
port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
if (transition == QP_TRANS_INIT2RTR &&
slave != mlx4_master_func_num(dev) &&
mlx4_is_qp_reserved(dev, qpn) &&
!mlx4_vf_smi_enabled(dev, slave, port)) {
/* only enabled VFs may create MLX proxy QPs */
mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
__func__, slave, port);
return -EPERM;
}
break;
default:
break;
}
return 0;
}
int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_mtt mtt;
__be64 *page_list = inbox->buf;
u64 *pg_list = (u64 *)page_list;
int i;
struct res_mtt *rmtt = NULL;
int start = be64_to_cpu(page_list[0]);
int npages = vhcr->in_modifier;
int err;
err = get_containing_mtt(dev, slave, start, npages, &rmtt);
if (err)
return err;
/* Call the SW implementation of write_mtt:
* - Prepare a dummy mtt struct
* - Translate inbox contents to simple addresses in host endianness */
mtt.offset = 0; /* TBD this is broken but I don't handle it since
we don't really use it */
mtt.order = 0;
mtt.page_shift = 0;
for (i = 0; i < npages; ++i)
pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
((u64 *)page_list + 2));
if (rmtt)
put_res(dev, slave, rmtt->com.res_id, RES_MTT);
return err;
}
int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int eqn = vhcr->in_modifier;
int res_id = eqn | (slave << 10);
struct res_eq *eq;
int err;
err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
if (err)
return err;
err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
if (err)
goto ex_abort;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put;
atomic_dec(&eq->mtt->ref_count);
put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
res_end_move(dev, slave, RES_EQ, res_id);
rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
return 0;
ex_put:
put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
ex_abort:
res_abort_move(dev, slave, RES_EQ, res_id);
return err;
}
int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_event_eq_info *event_eq;
struct mlx4_cmd_mailbox *mailbox;
u32 in_modifier = 0;
int err;
int res_id;
struct res_eq *req;
if (!priv->mfunc.master.slave_state)
return -EINVAL;
/* check for slave valid, slave not PF, and slave active */
if (slave < 0 || slave > dev->persist->num_vfs ||
slave == dev->caps.function ||
!priv->mfunc.master.slave_state[slave].active)
return 0;
event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
/* Create the event only if the slave is registered */
if (event_eq->eqn < 0)
return 0;
mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
res_id = (slave << 10) | event_eq->eqn;
err = get_res(dev, slave, res_id, RES_EQ, &req);
if (err)
goto unlock;
if (req->com.from_state != RES_EQ_HW) {
err = -EINVAL;
goto put;
}
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto put;
}
if (eqe->type == MLX4_EVENT_TYPE_CMD) {
++event_eq->token;
eqe->event.cmd.token = cpu_to_be16(event_eq->token);
}
memcpy(mailbox->buf, (u8 *) eqe, 28);
in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
put_res(dev, slave, res_id, RES_EQ);
mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
put:
put_res(dev, slave, res_id, RES_EQ);
unlock:
mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
return err;
}
int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int eqn = vhcr->in_modifier;
int res_id = eqn | (slave << 10);
struct res_eq *eq;
int err;
err = get_res(dev, slave, res_id, RES_EQ, &eq);
if (err)
return err;
if (eq->com.from_state != RES_EQ_HW) {
err = -EINVAL;
goto ex_put;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
ex_put:
put_res(dev, slave, res_id, RES_EQ);
return err;
}
int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int cqn = vhcr->in_modifier;
struct mlx4_cq_context *cqc = inbox->buf;
int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
struct res_cq *cq = NULL;
struct res_mtt *mtt;
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
if (err)
return err;
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto out_move;
err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
if (err)
goto out_put;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto out_put;
atomic_inc(&mtt->ref_count);
cq->mtt = mtt;
put_res(dev, slave, mtt->com.res_id, RES_MTT);
res_end_move(dev, slave, RES_CQ, cqn);
return 0;
out_put:
put_res(dev, slave, mtt->com.res_id, RES_MTT);
out_move:
res_abort_move(dev, slave, RES_CQ, cqn);
return err;
}
int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int cqn = vhcr->in_modifier;
struct res_cq *cq = NULL;
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto out_move;
atomic_dec(&cq->mtt->ref_count);
res_end_move(dev, slave, RES_CQ, cqn);
return 0;
out_move:
res_abort_move(dev, slave, RES_CQ, cqn);
return err;
}
int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int cqn = vhcr->in_modifier;
struct res_cq *cq;
int err;
err = get_res(dev, slave, cqn, RES_CQ, &cq);
if (err)
return err;
if (cq->com.from_state != RES_CQ_HW)
goto ex_put;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
ex_put:
put_res(dev, slave, cqn, RES_CQ);
return err;
}
static int handle_resize(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd,
struct res_cq *cq)
{
int err;
struct res_mtt *orig_mtt;
struct res_mtt *mtt;
struct mlx4_cq_context *cqc = inbox->buf;
int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
if (err)
return err;
if (orig_mtt != cq->mtt) {
err = -EINVAL;
goto ex_put;
}
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto ex_put;
err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
if (err)
goto ex_put1;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put1;
atomic_dec(&orig_mtt->ref_count);
put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
atomic_inc(&mtt->ref_count);
cq->mtt = mtt;
put_res(dev, slave, mtt->com.res_id, RES_MTT);
return 0;
ex_put1:
put_res(dev, slave, mtt->com.res_id, RES_MTT);
ex_put:
put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
return err;
}
int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int cqn = vhcr->in_modifier;
struct res_cq *cq;
int err;
err = get_res(dev, slave, cqn, RES_CQ, &cq);
if (err)
return err;
if (cq->com.from_state != RES_CQ_HW)
goto ex_put;
if (vhcr->op_modifier == 0) {
err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
goto ex_put;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
ex_put:
put_res(dev, slave, cqn, RES_CQ);
return err;
}
static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
{
int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
int log_rq_stride = srqc->logstride & 7;
int page_shift = (srqc->log_page_size & 0x3f) + 12;
if (log_srq_size + log_rq_stride + 4 < page_shift)
return 1;
return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
}
int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int srqn = vhcr->in_modifier;
struct res_mtt *mtt;
struct res_srq *srq = NULL;
struct mlx4_srq_context *srqc = inbox->buf;
int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
return -EINVAL;
err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
if (err)
return err;
err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
if (err)
goto ex_abort;
err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
mtt);
if (err)
goto ex_put_mtt;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_put_mtt;
atomic_inc(&mtt->ref_count);
srq->mtt = mtt;
put_res(dev, slave, mtt->com.res_id, RES_MTT);
res_end_move(dev, slave, RES_SRQ, srqn);
return 0;
ex_put_mtt:
put_res(dev, slave, mtt->com.res_id, RES_MTT);
ex_abort:
res_abort_move(dev, slave, RES_SRQ, srqn);
return err;
}
int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int srqn = vhcr->in_modifier;
struct res_srq *srq = NULL;
err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_abort;
atomic_dec(&srq->mtt->ref_count);
if (srq->cq)
atomic_dec(&srq->cq->ref_count);
res_end_move(dev, slave, RES_SRQ, srqn);
return 0;
ex_abort:
res_abort_move(dev, slave, RES_SRQ, srqn);
return err;
}
int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int srqn = vhcr->in_modifier;
struct res_srq *srq;
err = get_res(dev, slave, srqn, RES_SRQ, &srq);
if (err)
return err;
if (srq->com.from_state != RES_SRQ_HW) {
err = -EBUSY;
goto out;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
put_res(dev, slave, srqn, RES_SRQ);
return err;
}
int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int srqn = vhcr->in_modifier;
struct res_srq *srq;
err = get_res(dev, slave, srqn, RES_SRQ, &srq);
if (err)
return err;
if (srq->com.from_state != RES_SRQ_HW) {
err = -EBUSY;
goto out;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
put_res(dev, slave, srqn, RES_SRQ);
return err;
}
int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
err = get_res(dev, slave, qpn, RES_QP, &qp);
if (err)
return err;
if (qp->com.from_state != RES_QP_HW) {
err = -EBUSY;
goto out;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
put_res(dev, slave, qpn, RES_QP);
return err;
}
int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp_context *context = inbox->buf + 8;
adjust_proxy_tun_qkey(dev, vhcr, context);
update_pkey_index(dev, slave, inbox);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
struct mlx4_qp_context *qpc,
struct mlx4_cmd_mailbox *inbox)
{
enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
u8 pri_sched_queue;
int port = mlx4_slave_convert_port(
dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
if (port < 0)
return -EINVAL;
pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
((port & 1) << 6);
if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
qpc->pri_path.sched_queue = pri_sched_queue;
}
if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
port = mlx4_slave_convert_port(
dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
+ 1) - 1;
if (port < 0)
return -EINVAL;
qpc->alt_path.sched_queue =
(qpc->alt_path.sched_queue & ~(1 << 6)) |
(port & 1) << 6;
}
return 0;
}
static int roce_verify_mac(struct mlx4_dev *dev, int slave,
struct mlx4_qp_context *qpc,
struct mlx4_cmd_mailbox *inbox)
{
u64 mac;
int port;
u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
u8 sched = *(u8 *)(inbox->buf + 64);
u8 smac_ix;
port = (sched >> 6 & 1) + 1;
if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
return -ENOENT;
}
return 0;
}
int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *qpc = inbox->buf + 8;
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
u8 orig_vlan_index = qpc->pri_path.vlan_index;
u8 orig_feup = qpc->pri_path.feup;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
if (err)
return err;
if (roce_verify_mac(dev, slave, qpc, inbox))
return -EINVAL;
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, qpc);
orig_sched_queue = qpc->pri_path.sched_queue;
err = get_res(dev, slave, qpn, RES_QP, &qp);
if (err)
return err;
if (qp->com.from_state != RES_QP_HW) {
err = -EBUSY;
goto out;
}
err = update_vport_qp_param(dev, inbox, slave, qpn);
if (err)
goto out;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
out:
/* if no error, save sched queue value passed in by VF. This is
* essentially the QOS value provided by the VF. This will be useful
* if we allow dynamic changes from VST back to VGT
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
qp->vlan_index = orig_vlan_index;
qp->feup = orig_feup;
}
put_res(dev, slave, qpn, RES_QP);
return err;
}
int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
if (err)
return err;
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
if (err)
return err;
update_pkey_index(dev, slave, inbox);
update_gid(dev, inbox, (u8)slave);
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp_context *context = inbox->buf + 8;
int err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
if (err)
return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
update_gid(dev, inbox, (u8)slave);
update_pkey_index(dev, slave, inbox);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct mlx4_qp_context *context = inbox->buf + 8;
err = adjust_qp_sched_queue(dev, slave, context, inbox);
if (err)
return err;
err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
if (err)
return err;
adjust_proxy_tun_qkey(dev, vhcr, context);
update_gid(dev, inbox, (u8)slave);
update_pkey_index(dev, slave, inbox);
return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
}
int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
if (err)
goto ex_abort;
atomic_dec(&qp->mtt->ref_count);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
if (qp->srq)
atomic_dec(&qp->srq->ref_count);
res_end_move(dev, slave, RES_QP, qpn);
return 0;
ex_abort:
res_abort_move(dev, slave, RES_QP, qpn);
return err;
}
static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
struct res_qp *rqp, u8 *gid)
{
struct res_gid *res;
list_for_each_entry(res, &rqp->mcg_list, list) {
if (!memcmp(res->gid, gid, 16))
return res;
}
return NULL;
}
static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
u8 *gid, enum mlx4_protocol prot,
enum mlx4_steer_type steer, u64 reg_id)
{
struct res_gid *res;
int err;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
spin_lock_irq(&rqp->mcg_spl);
if (find_gid(dev, slave, rqp, gid)) {
kfree(res);
err = -EEXIST;
} else {
memcpy(res->gid, gid, 16);
res->prot = prot;
res->steer = steer;
res->reg_id = reg_id;
list_add_tail(&res->list, &rqp->mcg_list);
err = 0;
}
spin_unlock_irq(&rqp->mcg_spl);
return err;
}
static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
u8 *gid, enum mlx4_protocol prot,
enum mlx4_steer_type steer, u64 *reg_id)
{
struct res_gid *res;
int err;
spin_lock_irq(&rqp->mcg_spl);
res = find_gid(dev, slave, rqp, gid);
if (!res || res->prot != prot || res->steer != steer)
err = -EINVAL;
else {
*reg_id = res->reg_id;
list_del(&res->list);
kfree(res);
err = 0;
}
spin_unlock_irq(&rqp->mcg_spl);
return err;
}
static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
u8 gid[16], int block_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type type, u64 *reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
int port = mlx4_slave_convert_port(dev, slave, gid[5]);
if (port < 0)
return port;
return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
block_loopback, prot,
reg_id);
}
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH) {
int port = mlx4_slave_convert_port(dev, slave, gid[5]);
if (port < 0)
return port;
gid[5] = port;
}
return mlx4_qp_attach_common(dev, qp, gid,
block_loopback, prot, type);
default:
return -EINVAL;
}
}
static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], enum mlx4_protocol prot,
enum mlx4_steer_type type, u64 reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
return mlx4_flow_detach(dev, reg_id);
case MLX4_STEERING_MODE_B0:
return mlx4_qp_detach_common(dev, qp, gid, prot, type);
default:
return -EINVAL;
}
}
static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
u8 *gid, enum mlx4_protocol prot)
{
int real_port;
if (prot != MLX4_PROT_ETH)
return 0;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
if (real_port < 0)
return -EINVAL;
gid[5] = real_port;
}
return 0;
}
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_qp qp; /* dummy for calling attach/detach */
u8 *gid = inbox->buf;
enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
int err;
int qpn;
struct res_qp *rqp;
u64 reg_id = 0;
int attach = vhcr->op_modifier;
int block_loopback = vhcr->in_modifier >> 31;
u8 steer_type_mask = 2;
enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
qpn = vhcr->in_modifier & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err)
return err;
qp.qpn = qpn;
if (attach) {
err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
type, ®_id);
if (err) {
pr_err("Fail to attach rule to qp 0x%x\n", qpn);
goto ex_put;
}
err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
if (err)
goto ex_detach;
} else {
err = mlx4_adjust_port(dev, slave, gid, prot);
if (err)
goto ex_put;
err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
if (err)
goto ex_put;
err = qp_detach(dev, &qp, gid, prot, type, reg_id);
if (err)
pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
qpn, reg_id);
}
put_res(dev, slave, qpn, RES_QP);
return err;
ex_detach:
qp_detach(dev, &qp, gid, prot, type, reg_id);
ex_put:
put_res(dev, slave, qpn, RES_QP);
return err;
}
/*
* MAC validation for Flow Steering rules.
* VF can attach rules only with a mac address which is assigned to it.
*/
static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
struct list_head *rlist)
{
struct mac_res *res, *tmp;
__be64 be_mac;
/* make sure it isn't multicast or broadcast mac*/
if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
!is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
list_for_each_entry_safe(res, tmp, rlist, list) {
be_mac = cpu_to_be64(res->mac << 16);
if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
return 0;
}
pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
eth_header->eth.dst_mac, slave);
return -EINVAL;
}
return 0;
}
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
*/
static int add_eth_header(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct list_head *rlist, int header_id)
{
struct mac_res *res, *tmp;
u8 port;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct mlx4_net_trans_rule_hw_eth *eth_header;
struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
__be64 be_mac = 0;
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
port = ctrl->port;
eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
/* Clear a space in the inbox for eth header */
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_IPV4:
ip_header =
(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
memmove(ip_header, eth_header,
sizeof(*ip_header) + sizeof(*l4_header));
break;
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
(eth_header + 1);
memmove(l4_header, eth_header, sizeof(*l4_header));
break;
default:
return -EINVAL;
}
list_for_each_entry_safe(res, tmp, rlist, list) {
if (port == res->port) {
be_mac = cpu_to_be64(res->mac << 16);
break;
}
}
if (!be_mac) {
pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
port);
return -EINVAL;
}
memset(eth_header, 0, sizeof(*eth_header));
eth_header->size = sizeof(*eth_header) >> 2;
eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
return 0;
}
#define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd_info)
{
int err;
u32 qpn = vhcr->in_modifier & 0xffffff;
struct res_qp *rqp;
u64 mac;
unsigned port;
u64 pri_addr_path_mask;
struct mlx4_update_qp_context *cmd;
int smac_index;
cmd = (struct mlx4_update_qp_context *)inbox->buf;
pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
(pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
return -EPERM;
if ((pri_addr_path_mask &
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
!(dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
slave);
return -EOPNOTSUPP;
}
/* Just change the smac for the QP */
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
return err;
}
port = (rqp->sched_queue >> 6 & 1) + 1;
if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
smac_index = cmd->qp_context.pri_path.grh_mylmc;
err = mac_find_smac_ix_in_slave(dev, slave, port,
smac_index, &mac);
if (err) {
mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
qpn, smac_index);
goto err_mac;
}
}
err = mlx4_cmd(dev, inbox->dma,
vhcr->in_modifier, 0,
MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
goto err_mac;
}
err_mac:
put_res(dev, slave, qpn, RES_QP);
return err;
}
static u32 qp_attach_mbox_size(void *mbox)
{
u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
struct _rule_hw *rule_header;
rule_header = (struct _rule_hw *)(mbox + size);
while (rule_header->size) {
size += rule_header->size * sizeof(u32);
rule_header += 1;
}
return size;
}
static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
int err;
int qpn;
struct res_qp *rqp;
struct mlx4_net_trans_rule_hw_ctrl *ctrl;
struct _rule_hw *rule_header;
int header_id;
struct res_fs_rule *rrule;
u32 mbox_size;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
err = mlx4_slave_convert_port(dev, slave, ctrl->port);
if (err <= 0)
return -EINVAL;
ctrl->port = err;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
return err;
}
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
err = -EINVAL;
goto err_put_qp;
}
break;
case MLX4_NET_TRANS_RULE_ID_IB:
break;
case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL;
goto err_put_qp;
}
vhcr->in_modifier +=
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
pr_err("Corrupted mailbox\n");
err = -EINVAL;
goto err_put_qp;
}
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
goto err_put_qp;
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) {
mlx4_err(dev, "Fail to add flow steering resources\n");
goto err_detach;
}
err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
if (err)
goto err_detach;
mbox_size = qp_attach_mbox_size(inbox->buf);
rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
if (!rrule->mirr_mbox) {
err = -ENOMEM;
goto err_put_rule;
}
rrule->mirr_mbox_size = mbox_size;
rrule->mirr_rule_id = 0;
memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
/* set different port */
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
if (ctrl->port == 1)
ctrl->port = 2;
else
ctrl->port = 1;
if (mlx4_is_bonded(dev))
mlx4_do_mirror_rule(dev, rrule);
atomic_inc(&rqp->ref_count);
err_put_rule:
put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
err_detach:
/* detach rule on error */
if (err)
mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
err_put_qp:
put_res(dev, slave, qpn, RES_QP);
return err;
}
static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
{
int err;
err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
if (err) {
mlx4_err(dev, "Fail to remove flow steering resources\n");
return err;
}
mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
return 0;
}
int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
struct res_qp *rqp;
struct res_fs_rule *rrule;
u64 mirr_reg_id;
int qpn;
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
return -EOPNOTSUPP;
err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
if (err)
return err;
if (!rrule->mirr_mbox) {
mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
return -EINVAL;
}
mirr_reg_id = rrule->mirr_rule_id;
kfree(rrule->mirr_mbox);
qpn = rrule->qpn;
/* Release the rule form busy state before removal */
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err)
return err;
if (mirr_reg_id && mlx4_is_bonded(dev)) {
err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
if (err) {
mlx4_err(dev, "Fail to get resource of mirror rule\n");
} else {
put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
mlx4_undo_mirror_rule(dev, rrule);
}
}
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) {
mlx4_err(dev, "Fail to remove flow steering resources\n");
goto out;
}
err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (!err)
atomic_dec(&rqp->ref_count);
out:
put_res(dev, slave, qpn, RES_QP);
return err;
}
enum {
BUSY_MAX_RETRIES = 10
};
int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err;
int index = vhcr->in_modifier & 0xffff;
err = get_res(dev, slave, index, RES_COUNTER, NULL);
if (err)
return err;
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
put_res(dev, slave, index, RES_COUNTER);
return err;
}
static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
{
struct res_gid *rgid;
struct res_gid *tmp;
struct mlx4_qp qp; /* dummy for calling attach/detach */
list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
mlx4_flow_detach(dev, rgid->reg_id);
break;
case MLX4_STEERING_MODE_B0:
qp.qpn = rqp->local_qpn;
(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
rgid->prot, rgid->steer);
break;
}
list_del(&rgid->list);
kfree(rgid);
}
}
static int _move_all_busy(struct mlx4_dev *dev, int slave,
enum mlx4_resource type, int print)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker;
struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
struct res_common *r;
struct res_common *tmp;
int busy;
busy = 0;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(r, tmp, rlist, list) {
if (r->owner == slave) {
if (!r->removing) {
if (r->state == RES_ANY_BUSY) {
if (print)
mlx4_dbg(dev,
"%s id 0x%llx is busy\n",
resource_str(type),
r->res_id);
++busy;
} else {
r->from_state = r->state;
r->state = RES_ANY_BUSY;
r->removing = 1;
}
}
}
}
spin_unlock_irq(mlx4_tlock(dev));
return busy;
}
static int move_all_busy(struct mlx4_dev *dev, int slave,
enum mlx4_resource type)
{
unsigned long begin;
int busy;
begin = jiffies;
do {
busy = _move_all_busy(dev, slave, type, 0);
if (time_after(jiffies, begin + 5 * HZ))
break;
if (busy)
cond_resched();
} while (busy);
if (busy)
busy = _move_all_busy(dev, slave, type, 1);
return busy;
}
static void rem_slave_qps(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *qp_list =
&tracker->slave_list[slave].res_list[RES_QP];
struct res_qp *qp;
struct res_qp *tmp;
int state;
u64 in_param;
int qpn;
int err;
err = move_all_busy(dev, slave, RES_QP);
if (err)
mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (qp->com.owner == slave) {
qpn = qp->com.res_id;
detach_qp(dev, slave, qp);
state = qp->com.from_state;
while (state != 0) {
switch (state) {
case RES_QP_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&qp->com.node,
&tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
if (!valid_reserved(dev, slave, qpn)) {
__mlx4_qp_release_range(dev, qpn, 1);
mlx4_release_resource(dev, slave,
RES_QP, 1, 0);
}
kfree(qp);
state = 0;
break;
case RES_QP_MAPPED:
if (!valid_reserved(dev, slave, qpn))
__mlx4_qp_free_icm(dev, qpn);
state = RES_QP_RESERVED;
break;
case RES_QP_HW:
in_param = slave;
err = mlx4_cmd(dev, in_param,
qp->local_qpn, 2,
MLX4_CMD_2RST_QP,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
slave, qp->local_qpn);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
atomic_dec(&qp->mtt->ref_count);
if (qp->srq)
atomic_dec(&qp->srq->ref_count);
state = RES_QP_MAPPED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *srq_list =
&tracker->slave_list[slave].res_list[RES_SRQ];
struct res_srq *srq;
struct res_srq *tmp;
int state;
u64 in_param;
int srqn;
int err;
err = move_all_busy(dev, slave, RES_SRQ);
if (err)
mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (srq->com.owner == slave) {
srqn = srq->com.res_id;
state = srq->com.from_state;
while (state != 0) {
switch (state) {
case RES_SRQ_ALLOCATED:
__mlx4_srq_free_icm(dev, srqn);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&srq->com.node,
&tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_SRQ, 1, 0);
kfree(srq);
state = 0;
break;
case RES_SRQ_HW:
in_param = slave;
err = mlx4_cmd(dev, in_param, srqn, 1,
MLX4_CMD_HW2SW_SRQ,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
slave, srqn);
atomic_dec(&srq->mtt->ref_count);
if (srq->cq)
atomic_dec(&srq->cq->ref_count);
state = RES_SRQ_ALLOCATED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *cq_list =
&tracker->slave_list[slave].res_list[RES_CQ];
struct res_cq *cq;
struct res_cq *tmp;
int state;
u64 in_param;
int cqn;
int err;
err = move_all_busy(dev, slave, RES_CQ);
if (err)
mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
cqn = cq->com.res_id;
state = cq->com.from_state;
while (state != 0) {
switch (state) {
case RES_CQ_ALLOCATED:
__mlx4_cq_free_icm(dev, cqn);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&cq->com.node,
&tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_CQ, 1, 0);
kfree(cq);
state = 0;
break;
case RES_CQ_HW:
in_param = slave;
err = mlx4_cmd(dev, in_param, cqn, 1,
MLX4_CMD_HW2SW_CQ,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
slave, cqn);
atomic_dec(&cq->mtt->ref_count);
state = RES_CQ_ALLOCATED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *mpt_list =
&tracker->slave_list[slave].res_list[RES_MPT];
struct res_mpt *mpt;
struct res_mpt *tmp;
int state;
u64 in_param;
int mptn;
int err;
err = move_all_busy(dev, slave, RES_MPT);
if (err)
mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (mpt->com.owner == slave) {
mptn = mpt->com.res_id;
state = mpt->com.from_state;
while (state != 0) {
switch (state) {
case RES_MPT_RESERVED:
__mlx4_mpt_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&mpt->com.node,
&tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave,
RES_MPT, 1, 0);
kfree(mpt);
state = 0;
break;
case RES_MPT_MAPPED:
__mlx4_mpt_free_icm(dev, mpt->key);
state = RES_MPT_RESERVED;
break;
case RES_MPT_HW:
in_param = slave;
err = mlx4_cmd(dev, in_param, mptn, 0,
MLX4_CMD_HW2SW_MPT,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
slave, mptn);
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
state = RES_MPT_MAPPED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker;
struct list_head *mtt_list =
&tracker->slave_list[slave].res_list[RES_MTT];
struct res_mtt *mtt;
struct res_mtt *tmp;
int state;
int base;
int err;
err = move_all_busy(dev, slave, RES_MTT);
if (err)
mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (mtt->com.owner == slave) {
base = mtt->com.res_id;
state = mtt->com.from_state;
while (state != 0) {
switch (state) {
case RES_MTT_ALLOCATED:
__mlx4_free_mtt_range(dev, base,
mtt->order);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&mtt->com.node,
&tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
mlx4_release_resource(dev, slave, RES_MTT,
1 << mtt->order, 0);
kfree(mtt);
state = 0;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
struct res_fs_rule *mirr_rule;
u64 reg_id;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
if (!fs_rule->mirr_mbox) {
mlx4_err(dev, "rule mirroring mailbox is null\n");
mlx4_free_cmd_mailbox(dev, mailbox);
return -EINVAL;
}
memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err;
err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
if (err)
goto err_detach;
err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
if (err)
goto err_rem;
fs_rule->mirr_rule_id = reg_id;
mirr_rule->mirr_rule_id = 0;
mirr_rule->mirr_mbox_size = 0;
mirr_rule->mirr_mbox = NULL;
put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
return 0;
err_rem:
rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
err_detach:
mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
err:
return err;
}
static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker;
struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
struct rb_node *p;
struct res_fs_rule *fs_rule;
int err = 0;
LIST_HEAD(mirr_list);
for (p = rb_first(root); p; p = rb_next(p)) {
fs_rule = rb_entry(p, struct res_fs_rule, com.node);
if ((bond && fs_rule->mirr_mbox_size) ||
(!bond && !fs_rule->mirr_mbox_size))
list_add_tail(&fs_rule->mirr_list, &mirr_list);
}
list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
if (bond)
err += mlx4_do_mirror_rule(dev, fs_rule);
else
err += mlx4_undo_mirror_rule(dev, fs_rule);
}
return err;
}
int mlx4_bond_fs_rules(struct mlx4_dev *dev)
{
return mlx4_mirror_fs_rules(dev, true);
}
int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
{
return mlx4_mirror_fs_rules(dev, false);
}
static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker =
&priv->mfunc.master.res_tracker;
struct list_head *fs_rule_list =
&tracker->slave_list[slave].res_list[RES_FS_RULE];
struct res_fs_rule *fs_rule;
struct res_fs_rule *tmp;
int state;
u64 base;
int err;
err = move_all_busy(dev, slave, RES_FS_RULE);
if (err)
mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (fs_rule->com.owner == slave) {
base = fs_rule->com.res_id;
state = fs_rule->com.from_state;
while (state != 0) {
switch (state) {
case RES_FS_RULE_ALLOCATED:
/* detach rule */
err = mlx4_cmd(dev, base, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&fs_rule->com.node,
&tracker->res_tree[RES_FS_RULE]);
list_del(&fs_rule->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(fs_rule->mirr_mbox);
kfree(fs_rule);
state = 0;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *eq_list =
&tracker->slave_list[slave].res_list[RES_EQ];
struct res_eq *eq;
struct res_eq *tmp;
int err;
int state;
int eqn;
err = move_all_busy(dev, slave, RES_EQ);
if (err)
mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (eq->com.owner == slave) {
eqn = eq->com.res_id;
state = eq->com.from_state;
while (state != 0) {
switch (state) {
case RES_EQ_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
rb_erase(&eq->com.node,
&tracker->res_tree[RES_EQ]);
list_del(&eq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(eq);
state = 0;
break;
case RES_EQ_HW:
err = mlx4_cmd(dev, slave, eqn & 0x3ff,
1, MLX4_CMD_HW2SW_EQ,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
slave, eqn & 0x3ff);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
break;
default:
state = 0;
}
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
}
static void rem_slave_counters(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *counter_list =
&tracker->slave_list[slave].res_list[RES_COUNTER];
struct res_counter *counter;
struct res_counter *tmp;
int err;
int *counters_arr = NULL;
int i, j;
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
slave);
counters_arr = kmalloc_array(dev->caps.max_counters,
sizeof(*counters_arr), GFP_KERNEL);
if (!counters_arr)
return;
do {
i = 0;
j = 0;
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
if (counter->com.owner == slave) {
counters_arr[i++] = counter->com.res_id;
rb_erase(&counter->com.node,
&tracker->res_tree[RES_COUNTER]);
list_del(&counter->com.list);
kfree(counter);
}
}
spin_unlock_irq(mlx4_tlock(dev));
while (j < i) {
__mlx4_counter_free(dev, counters_arr[j++]);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
}
} while (i);
kfree(counters_arr);
}
static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
struct list_head *xrcdn_list =
&tracker->slave_list[slave].res_list[RES_XRCD];
struct res_xrcdn *xrcd;
struct res_xrcdn *tmp;
int err;
int xrcdn;
err = move_all_busy(dev, slave, RES_XRCD);
if (err)
mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
if (xrcd->com.owner == slave) {
xrcdn = xrcd->com.res_id;
rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
list_del(&xrcd->com.list);
kfree(xrcd);
__mlx4_xrcd_free(dev, xrcdn);
}
}
spin_unlock_irq(mlx4_tlock(dev));
}
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_reset_roce_gids(dev, slave);
mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
rem_slave_vlans(dev, slave);
rem_slave_macs(dev, slave);
rem_slave_fs_rule(dev, slave);
rem_slave_qps(dev, slave);
rem_slave_srqs(dev, slave);
rem_slave_cqs(dev, slave);
rem_slave_mrs(dev, slave);
rem_slave_eqs(dev, slave);
rem_slave_mtts(dev, slave);
rem_slave_counters(dev, slave);
rem_slave_xrcdns(dev, slave);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
struct mlx4_vf_immed_vlan_work *work)
{
ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
ctx->qp_context.qos_vport = work->qos_vport;
}
void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
{
struct mlx4_vf_immed_vlan_work *work =
container_of(_work, struct mlx4_vf_immed_vlan_work, work);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_update_qp_context *upd_context;
struct mlx4_dev *dev = &work->priv->dev;
struct mlx4_resource_tracker *tracker =
&work->priv->mfunc.master.res_tracker;
struct list_head *qp_list =
&tracker->slave_list[work->slave].res_list[RES_QP];
struct res_qp *qp;
struct res_qp *tmp;
u64 qp_path_mask_vlan_ctrl =
((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
(1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
(1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
int err;
int port, errors = 0;
u8 vlan_control;
if (mlx4_is_slave(dev)) {
mlx4_warn(dev, "Trying to update-qp in slave %d\n",
work->slave);
goto out;
}
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
goto out;
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
else if (!work->vlan_id)
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
else if (work->vlan_proto == htons(ETH_P_8021AD))
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
else /* vst 802.1Q */
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
upd_context = mailbox->buf;
upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
spin_unlock_irq(mlx4_tlock(dev));
if (qp->com.owner == work->slave) {
if (qp->com.from_state != RES_QP_HW ||
!qp->sched_queue || /* no INIT2RTR trans yet */
mlx4_is_qp_reserved(dev, qp->local_qpn) ||
qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
spin_lock_irq(mlx4_tlock(dev));
continue;
}
port = (qp->sched_queue >> 6 & 1) + 1;
if (port != work->port) {
spin_lock_irq(mlx4_tlock(dev));
continue;
}
if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
else
upd_context->primary_addr_path_mask =
cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
if (work->vlan_id == MLX4_VGT) {
upd_context->qp_context.param3 = qp->param3;
upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
upd_context->qp_context.pri_path.feup = qp->feup;
upd_context->qp_context.pri_path.sched_queue =
qp->sched_queue;
} else {
upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
upd_context->qp_context.pri_path.vlan_control = vlan_control;
upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
upd_context->qp_context.pri_path.fvl_rx =
qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.fl =
qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
if (work->vlan_proto == htons(ETH_P_8021AD))
upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
else
upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
upd_context->qp_context.pri_path.feup =
qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.sched_queue =
qp->sched_queue & 0xC7;
upd_context->qp_context.pri_path.sched_queue |=
((work->qos & 0x7) << 3);
if (dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_QOS_VPP)
update_qos_vpp(upd_context, work);
}
err = mlx4_cmd(dev, mailbox->dma,
qp->local_qpn & 0xffffff,
0, MLX4_CMD_UPDATE_QP,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) {
mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
work->slave, port, qp->local_qpn, err);
errors++;
}
}
spin_lock_irq(mlx4_tlock(dev));
}
spin_unlock_irq(mlx4_tlock(dev));
mlx4_free_cmd_mailbox(dev, mailbox);
if (errors)
mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
errors, work->slave, work->port);
/* unregister previous vlan_id if needed and we had no errors
* while updating the QPs
*/
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
NO_INDX != work->orig_vlan_ix)
__mlx4_unregister_vlan(&work->priv->dev, work->port,
work->orig_vlan_id);
out:
kfree(work);
return;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/mlx4/driver.h>
#include "mlx4_en.h"
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
{
struct sk_buff *skb;
struct ethhdr *ethh;
unsigned char *packet;
unsigned int packet_size = MLX4_LOOPBACK_TEST_PAYLOAD;
unsigned int i;
int err;
/* build the pkt before xmit */
skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
if (!skb)
return -ENOMEM;
skb_reserve(skb, NET_IP_ALIGN);
ethh = skb_put(skb, sizeof(struct ethhdr));
packet = skb_put(skb, packet_size);
memcpy(ethh->h_dest, priv->dev->dev_addr, ETH_ALEN);
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
skb_reset_mac_header(skb);
for (i = 0; i < packet_size; ++i) /* fill our packet */
packet[i] = (unsigned char)(i & 0xff);
/* xmit the pkt */
err = mlx4_en_xmit(skb, priv->dev);
return err;
}
static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
{
u32 loopback_ok = 0;
int i;
priv->loopback_ok = 0;
priv->validate_loopback = 1;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n");
goto mlx4_en_test_loopback_exit;
}
/* polling for result */
for (i = 0; i < MLX4_EN_LOOPBACK_RETRIES; ++i) {
msleep(MLX4_EN_LOOPBACK_TIMEOUT);
if (priv->loopback_ok) {
loopback_ok = 1;
break;
}
}
if (!loopback_ok)
en_err(priv, "Loopback packet didn't arrive\n");
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
int i = 0;
err = mlx4_test_async(mdev->dev);
/* When not in MSI_X or slave, test only async */
if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
return err;
/* A loop over all completion vectors of current port,
* for each vector check whether it works by mapping command
* completions to that vector and performing a NOP command
*/
for (i = 0; i < priv->rx_ring_num; i++) {
err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
if (err)
break;
}
return err;
}
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
{
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
if (priv->port_state.link_state == 1)
return 0;
else
return 1;
}
static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
{
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
/* The device supports 100M, 1G, 10G, 20G, 40G and 56G speed */
if (priv->port_state.link_speed != SPEED_100 &&
priv->port_state.link_speed != SPEED_1000 &&
priv->port_state.link_speed != SPEED_10000 &&
priv->port_state.link_speed != SPEED_20000 &&
priv->port_state.link_speed != SPEED_40000 &&
priv->port_state.link_speed != SPEED_56000)
return priv->port_state.link_speed;
return 0;
}
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
if (*flags & ETH_TEST_FL_OFFLINE) {
/* disable the interface */
carrier_ok = netif_carrier_ok(dev);
netif_carrier_off(dev);
/* Wait until all tx queues are empty.
* there should not be any additional incoming traffic
* since we turned the carrier off */
msleep(200);
if (priv->mdev->dev->caps.flags &
MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
buf[3] = mlx4_en_test_registers(priv);
if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU)
buf[4] = mlx4_en_test_loopback(priv);
}
if (carrier_ok)
netif_carrier_on(dev);
}
buf[0] = mlx4_en_test_interrupts(priv);
buf[1] = mlx4_en_test_link(priv);
buf[2] = mlx4_en_test_speed(priv);
for (i = 0; i < MLX4_EN_NUM_SELF_TEST; i++) {
if (buf[i])
*flags |= ETH_TEST_FL_FAILED;
}
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <asm/page.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/vmalloc.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/indirect_call_wrapper.h>
#include <net/ipv6.h>
#include "mlx4_en.h"
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, u32 size,
u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring;
int tmp;
int err;
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
en_err(priv, "Failed allocating TX ring\n");
return -ENOMEM;
}
ring->size = size;
ring->size_mask = size - 1;
ring->sp_stride = stride;
ring->full_size = ring->size - HEADROOM - MLX4_MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
if (!ring->tx_info) {
err = -ENOMEM;
goto err_ring;
}
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
ring->bounce_buf = kmalloc_node(MLX4_TX_BOUNCE_BUFFER_SIZE,
GFP_KERNEL, node);
if (!ring->bounce_buf) {
ring->bounce_buf = kmalloc(MLX4_TX_BOUNCE_BUFFER_SIZE,
GFP_KERNEL);
if (!ring->bounce_buf) {
err = -ENOMEM;
goto err_info;
}
}
ring->buf_size = ALIGN(size * ring->sp_stride, MLX4_EN_PAGE_SIZE);
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
ring->buf = ring->sp_wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
ring, ring->buf, ring->size, ring->buf_size,
(unsigned long long) ring->sp_wqres.buf.direct.map);
err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
MLX4_RESERVE_ETH_BF_QP,
MLX4_RES_USAGE_DRIVER);
if (err) {
en_err(priv, "failed reserving qp for TX ring\n");
goto err_hwq_res;
}
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
}
ring->sp_qp.event = mlx4_en_sqp_event;
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
ring->bf.uar = &mdev->priv_uar;
ring->bf.uar->map = mdev->uar_map;
ring->bf_enabled = false;
ring->bf_alloced = false;
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
} else {
ring->bf_alloced = true;
ring->bf_enabled = !!(priv->pflags &
MLX4_EN_PRIV_FLAGS_BLUEFLAME);
}
ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
if (queue_index < priv->num_tx_rings_p_up)
cpumask_set_cpu(cpumask_local_spread(queue_index,
priv->mdev->dev->numa_node),
&ring->sp_affinity_mask);
*pring = ring;
return 0;
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_hwq_res:
mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
err_bounce:
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
err_info:
kvfree(ring->tx_info);
ring->tx_info = NULL;
err_ring:
kfree(ring);
*pring = NULL;
return err;
}
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *ring = *pring;
en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
if (ring->bf_alloced)
mlx4_bf_free(mdev->dev, &ring->bf);
mlx4_qp_remove(mdev->dev, &ring->sp_qp);
mlx4_qp_free(mdev->dev, &ring->sp_qp);
mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
mlx4_free_hwq_res(mdev->dev, &ring->sp_wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
kvfree(ring->tx_info);
ring->tx_info = NULL;
kfree(ring);
*pring = NULL;
}
int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int cq, int user_prio)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
ring->sp_cqn = cq;
ring->prod = 0;
ring->cons = 0xffffffff;
ring->last_nr_txbb = 1;
memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
ring->free_tx_desc = mlx4_en_free_tx_desc;
ring->sp_qp_state = MLX4_QP_STATE_RST;
ring->doorbell_qpn = cpu_to_be32(ring->sp_qp.qpn << 8);
ring->mr_key = cpu_to_be32(mdev->mr.key);
mlx4_en_fill_qp_context(priv, ring->size, ring->sp_stride, 1, 0, ring->qpn,
ring->sp_cqn, user_prio, &ring->sp_context);
if (ring->bf_alloced)
ring->sp_context.usr_page =
cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
ring->bf.uar->index));
err = mlx4_qp_to_ready(mdev->dev, &ring->sp_wqres.mtt, &ring->sp_context,
&ring->sp_qp, &ring->sp_qp_state);
if (!cpumask_empty(&ring->sp_affinity_mask))
netif_set_xps_queue(priv->dev, &ring->sp_affinity_mask,
ring->queue_index);
return err;
}
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_qp_modify(mdev->dev, NULL, ring->sp_qp_state,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->sp_qp);
}
static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
{
u32 used = READ_ONCE(ring->prod) - READ_ONCE(ring->cons);
return used > ring->full_size;
}
static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, int index,
u8 owner)
{
__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
void *end = ring->buf + ring->buf_size;
__be32 *ptr = (__be32 *)tx_desc;
int i;
/* Optimize the common case when there are no wraparounds */
if (likely((void *)tx_desc +
(tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
/* Stamp the freed descriptor */
for (i = 0; i < tx_info->nr_txbb << LOG_TXBB_SIZE;
i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
}
} else {
/* Stamp the freed descriptor */
for (i = 0; i < tx_info->nr_txbb << LOG_TXBB_SIZE;
i += STAMP_STRIDE) {
*ptr = stamp;
ptr += STAMP_DWORDS;
if ((void *)ptr >= end) {
ptr = ring->buf;
stamp ^= cpu_to_be32(0x80000000);
}
}
}
}
INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
int napi_mode));
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
void *end = ring->buf + ring->buf_size;
struct sk_buff *skb = tx_info->skb;
int nr_maps = tx_info->nr_maps;
int i;
/* We do not touch skb here, so prefetch skb->users location
* to speedup consume_skb()
*/
prefetchw(&skb->users);
if (unlikely(timestamp)) {
struct skb_shared_hwtstamps hwts;
mlx4_en_fill_hwtstamps(priv->mdev, &hwts, timestamp);
skb_tstamp_tx(skb, &hwts);
}
if (!tx_info->inl) {
if (tx_info->linear)
dma_unmap_single(priv->ddev,
tx_info->map0_dma,
tx_info->map0_byte_count,
DMA_TO_DEVICE);
else
dma_unmap_page(priv->ddev,
tx_info->map0_dma,
tx_info->map0_byte_count,
DMA_TO_DEVICE);
/* Optimize the common case when there are no wraparounds */
if (likely((void *)tx_desc +
(tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) {
for (i = 1; i < nr_maps; i++) {
data++;
dma_unmap_page(priv->ddev,
(dma_addr_t)be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
DMA_TO_DEVICE);
}
} else {
if ((void *)data >= end)
data = ring->buf + ((void *)data - end);
for (i = 1; i < nr_maps; i++) {
data++;
/* Check for wraparound before unmapping */
if ((void *) data >= end)
data = ring->buf;
dma_unmap_page(priv->ddev,
(dma_addr_t)be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
DMA_TO_DEVICE);
}
}
}
napi_consume_skb(skb, napi_mode);
return tx_info->nr_txbb;
}
INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
int napi_mode));
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
int index, u64 timestamp,
int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_rx_alloc frame = {
.page = tx_info->page,
.dma = tx_info->map0_dma,
};
if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma,
PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page);
}
return tx_info->nr_txbb;
}
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int cnt = 0;
/* Skip last polled descriptor */
ring->cons += ring->last_nr_txbb;
en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
ring->cons, ring->prod);
if ((u32) (ring->prod - ring->cons) > ring->size) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Tx consumer passed producer!\n");
return 0;
}
while (ring->cons != ring->prod) {
ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
0, 0 /* Non-NAPI caller */);
ring->cons += ring->last_nr_txbb;
cnt++;
}
if (ring->tx_queue)
netdev_tx_reset_queue(ring->tx_queue);
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
return cnt;
}
static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
u16 cqe_index, struct mlx4_en_tx_ring *ring)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_info *tx_info;
struct mlx4_en_tx_desc *tx_desc;
u16 wqe_index;
int desc_size;
en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
false);
wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
tx_info = &ring->tx_info[wqe_index];
desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
wqe_index, desc_size);
tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
return;
en_err(priv, "Scheduling port restart\n");
queue_work(mdev->workqueue, &priv->restart_task);
}
int mlx4_en_process_tx_cq(struct net_device *dev,
struct mlx4_en_cq *cq, int napi_budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring];
struct mlx4_cqe *cqe;
u16 index, ring_index, stamp_index;
u32 txbbs_skipped = 0;
u32 txbbs_stamp = 0;
u32 cons_index = mcq->cons_index;
int size = cq->size;
u32 size_mask = ring->size_mask;
struct mlx4_cqe *buf = cq->buf;
u32 packets = 0;
u32 bytes = 0;
int factor = priv->cqe_factor;
int done = 0;
int budget = priv->tx_work_limit;
u32 last_nr_txbb;
u32 ring_cons;
if (unlikely(!priv->port_up))
return 0;
netdev_txq_bql_complete_prefetchw(ring->tx_queue);
index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
last_nr_txbb = READ_ONCE(ring->last_nr_txbb);
ring_cons = READ_ONCE(ring->cons);
ring_index = ring_cons & size_mask;
stamp_index = ring_index;
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cons_index & size) && (done < budget)) {
u16 new_index;
/*
* make sure we read the CQE after we read the
* ownership bit
*/
dma_rmb();
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR))
if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
ring);
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
do {
u64 timestamp = 0;
txbbs_skipped += last_nr_txbb;
ring_index = (ring_index + last_nr_txbb) & size_mask;
if (unlikely(ring->tx_info[ring_index].ts_requested))
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc,
mlx4_en_free_tx_desc,
mlx4_en_recycle_tx_desc,
priv, ring, ring_index,
timestamp, napi_budget);
mlx4_en_stamp_wqe(priv, ring, stamp_index,
!!((ring_cons + txbbs_stamp) &
ring->size));
stamp_index = ring_index;
txbbs_stamp = txbbs_skipped;
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
} while ((++done < budget) && (ring_index != new_index));
++cons_index;
index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
}
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
*/
mcq->cons_index = cons_index;
mlx4_cq_set_ci(mcq);
wmb();
/* we want to dirty this cache line once */
WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb);
WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped);
if (cq->type == TX_XDP)
return done;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
/* Wakeup Tx queue if this stopped, and ring is not full.
*/
if (netif_tx_queue_stopped(ring->tx_queue) &&
!mlx4_en_is_tx_ring_full(ring)) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
return done;
}
void mlx4_en_tx_irq(struct mlx4_cq *mcq)
{
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
if (likely(priv->port_up))
napi_schedule_irqoff(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
/* TX CQ polling - called by NAPI */
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
{
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
int work_done;
work_done = mlx4_en_process_tx_cq(dev, cq, budget);
if (work_done >= budget)
return budget;
if (napi_complete_done(napi, work_done))
mlx4_en_arm_cq(priv, cq);
return 0;
}
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
u32 index,
unsigned int desc_size)
{
u32 copy = (ring->size - index) << LOG_TXBB_SIZE;
int i;
for (i = desc_size - copy - 4; i >= 0; i -= 4) {
if ((i & (TXBB_SIZE - 1)) == 0)
wmb();
*((u32 *) (ring->buf + i)) =
*((u32 *) (ring->bounce_buf + copy + i));
}
for (i = copy - 4; i >= 4 ; i -= 4) {
if ((i & (TXBB_SIZE - 1)) == 0)
wmb();
*((u32 *)(ring->buf + (index << LOG_TXBB_SIZE) + i)) =
*((u32 *) (ring->bounce_buf + i));
}
/* Return real descriptor location */
return ring->buf + (index << LOG_TXBB_SIZE);
}
/* Decide if skb can be inlined in tx descriptor to avoid dma mapping
*
* It seems strange we do not simply use skb_copy_bits().
* This would allow to inline all skbs iff skb->len <= inline_thold
*
* Note that caller already checked skb was not a gso packet
*/
static bool is_inline(int inline_thold, const struct sk_buff *skb,
const struct skb_shared_info *shinfo,
void **pfrag)
{
void *ptr;
if (skb->len > inline_thold || !inline_thold)
return false;
if (shinfo->nr_frags == 1) {
ptr = skb_frag_address_safe(&shinfo->frags[0]);
if (unlikely(!ptr))
return false;
*pfrag = ptr;
return true;
}
if (shinfo->nr_frags)
return false;
return true;
}
static int inline_size(const struct sk_buff *skb)
{
if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
<= MLX4_INLINE_ALIGN)
return ALIGN(skb->len + CTRL_SIZE +
sizeof(struct mlx4_wqe_inline_seg), 16);
else
return ALIGN(skb->len + CTRL_SIZE + 2 *
sizeof(struct mlx4_wqe_inline_seg), 16);
}
static int get_real_size(const struct sk_buff *skb,
const struct skb_shared_info *shinfo,
struct net_device *dev,
int *lso_header_size,
bool *inline_ok,
void **pfrag,
int *hopbyhop)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (shinfo->gso_size) {
*inline_ok = false;
*hopbyhop = 0;
if (skb->encapsulation) {
*lso_header_size = skb_inner_tcp_all_headers(skb);
} else {
/* Detects large IPV6 TCP packets and prepares for removal of
* HBH header that has been pushed by ip6_xmit(),
* mainly so that tcpdump can dissect them.
*/
if (ipv6_has_hopopt_jumbo(skb))
*hopbyhop = sizeof(struct hop_jumbo_hdr);
*lso_header_size = skb_tcp_all_headers(skb);
}
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
if (*lso_header_size < skb_headlen(skb))
real_size += DS_SIZE;
else {
if (netif_msg_tx_err(priv))
en_warn(priv, "Non-linear headers\n");
return 0;
}
}
} else {
*lso_header_size = 0;
*inline_ok = is_inline(priv->prof->inline_thold, skb,
shinfo, pfrag);
if (*inline_ok)
real_size = inline_size(skb);
else
real_size = CTRL_SIZE +
(shinfo->nr_frags + 1) * DS_SIZE;
}
return real_size;
}
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
const struct sk_buff *skb,
const struct skb_shared_info *shinfo,
void *fragptr)
{
struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof(*inl);
unsigned int hlen = skb_headlen(skb);
if (skb->len <= spc) {
if (likely(skb->len >= MIN_PKT_LEN)) {
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
} else {
inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
memset(inl->data + skb->len, 0,
MIN_PKT_LEN - skb->len);
}
skb_copy_from_linear_data(skb, inl->data, hlen);
if (shinfo->nr_frags)
memcpy(inl->data + hlen, fragptr,
skb_frag_size(&shinfo->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (hlen <= spc) {
skb_copy_from_linear_data(skb, inl->data, hlen);
if (hlen < spc) {
memcpy(inl->data + hlen,
fragptr, spc - hlen);
fragptr += spc - hlen;
}
inl = (void *)inl->data + spc;
memcpy(inl->data, fragptr, skb->len - spc);
} else {
skb_copy_from_linear_data(skb, inl->data, spc);
inl = (void *)inl->data + spc;
skb_copy_from_linear_data_offset(skb, spc, inl->data,
hlen - spc);
if (shinfo->nr_frags)
memcpy(inl->data + hlen - spc,
fragptr,
skb_frag_size(&shinfo->frags[0]));
}
dma_wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
return netdev_pick_tx(dev, skb, NULL);
return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
unsigned int bytecnt)
{
__iowrite64_copy(dst, src, bytecnt / 8);
}
void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
{
wmb();
/* Since there is no iowrite*_native() that writes the
* value as is, without byteswapping - using the one
* the doesn't do byteswapping in the relevant arch
* endianness.
*/
#if defined(__LITTLE_ENDIAN)
iowrite32(
#else
iowrite32be(
#endif
(__force u32)ring->doorbell_qpn, ring->doorbell_address);
}
static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
struct mlx4_en_tx_desc *tx_desc,
union mlx4_wqe_qpn_vlan qpn_vlan,
int desc_size, int bf_index,
__be32 op_own, bool bf_ok,
bool send_doorbell)
{
tx_desc->ctrl.qpn_vlan = qpn_vlan;
if (bf_ok) {
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descriptor hits memory
* before setting ownership of this descriptor to HW
*/
dma_wmb();
tx_desc->ctrl.owner_opcode = op_own;
wmb();
mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl,
desc_size);
wmb();
ring->bf.offset ^= ring->bf.buf_size;
} else {
/* Ensure new descriptor hits memory
* before setting ownership of this descriptor to HW
*/
dma_wmb();
tx_desc->ctrl.owner_opcode = op_own;
if (send_doorbell)
mlx4_en_xmit_doorbell(ring);
else
ring->xmit_more++;
}
}
static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
struct skb_shared_info *shinfo,
struct mlx4_wqe_data_seg *data,
struct sk_buff *skb,
int lso_header_size,
__be32 mr_key,
struct mlx4_en_tx_info *tx_info)
{
struct device *ddev = priv->ddev;
dma_addr_t dma = 0;
u32 byte_count = 0;
int i_frag;
/* Map fragments if any */
for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
const skb_frag_t *frag = &shinfo->frags[i_frag];
byte_count = skb_frag_size(frag);
dma = skb_frag_dma_map(ddev, frag,
0, byte_count,
DMA_TO_DEVICE);
if (dma_mapping_error(ddev, dma))
goto tx_drop_unmap;
data->addr = cpu_to_be64(dma);
data->lkey = mr_key;
dma_wmb();
data->byte_count = cpu_to_be32(byte_count);
--data;
}
/* Map linear part if needed */
if (tx_info->linear) {
byte_count = skb_headlen(skb) - lso_header_size;
dma = dma_map_single(ddev, skb->data +
lso_header_size, byte_count,
DMA_TO_DEVICE);
if (dma_mapping_error(ddev, dma))
goto tx_drop_unmap;
data->addr = cpu_to_be64(dma);
data->lkey = mr_key;
dma_wmb();
data->byte_count = cpu_to_be32(byte_count);
}
/* tx completion can avoid cache line miss for common cases */
tx_info->map0_dma = dma;
tx_info->map0_byte_count = byte_count;
return true;
tx_drop_unmap:
en_err(priv, "DMA mapping error\n");
while (++i_frag < shinfo->nr_frags) {
++data;
dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
DMA_TO_DEVICE);
}
return false;
}
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mlx4_en_priv *priv = netdev_priv(dev);
union mlx4_wqe_qpn_vlan qpn_vlan = {};
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
u32 __maybe_unused ring_cons;
int tx_ind;
int nr_txbb;
int desc_size;
int real_size;
u32 index, bf_index;
struct ipv6hdr *h6;
__be32 op_own;
int lso_header_size;
void *fragptr = NULL;
bool bounce = false;
bool send_doorbell;
bool stop_queue;
bool inline_ok;
u8 data_offset;
int hopbyhop;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[TX][tx_ind];
if (unlikely(!priv->port_up))
goto tx_drop;
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr, &hopbyhop);
if (unlikely(!real_size))
goto tx_drop_count;
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
nr_txbb = desc_size >> LOG_TXBB_SIZE;
bf_ok = ring->bf_enabled;
if (skb_vlan_tag_present(skb)) {
u16 vlan_proto;
qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
vlan_proto = be16_to_cpu(skb->vlan_proto);
if (vlan_proto == ETH_P_8021AD)
qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
else if (vlan_proto == ETH_P_8021Q)
qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
else
qpn_vlan.ins_vlan = 0;
bf_ok = false;
}
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
/* Packet is good - grab an index and transmit it */
index = ring->prod & ring->size_mask;
bf_index = ring->prod;
/* See if we have enough space for whole descriptor TXBB for setting
* SW ownership on next descriptor; if not, use a bounce buffer. */
if (likely(index + nr_txbb <= ring->size))
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
else {
if (unlikely(nr_txbb > MLX4_MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
goto tx_drop_count;
}
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
bf_ok = false;
}
/* Save skb in tx_info ring */
tx_info = &ring->tx_info[index];
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
if (!lso_header_size) {
data = &tx_desc->data;
data_offset = offsetof(struct mlx4_en_tx_desc, data);
} else {
int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
data = (void *)&tx_desc->lso + lso_align;
data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
}
/* valid only for none inline segments */
tx_info->data_offset = data_offset;
tx_info->inl = inline_ok;
tx_info->linear = lso_header_size < skb_headlen(skb) && !inline_ok;
tx_info->nr_maps = shinfo->nr_frags + tx_info->linear;
data += tx_info->nr_maps - 1;
if (!tx_info->inl)
if (!mlx4_en_build_dma_wqe(priv, shinfo, data, skb,
lso_header_size, ring->mr_key,
tx_info))
goto tx_drop_count;
/*
* For timestamping add flag to skb_shinfo and
* set flag for further reference
*/
tx_info->ts_requested = 0;
if (unlikely(ring->hwtstamp_tx_type == HWTSTAMP_TX_ON &&
shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
shinfo->tx_flags |= SKBTX_IN_PROGRESS;
tx_info->ts_requested = 1;
}
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
if (!skb->encapsulation)
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
else
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
ring->tx_csum++;
}
if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
struct ethhdr *ethh;
/* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other
*/
ethh = (struct ethhdr *)skb->data;
tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
}
/* Handle LSO (TSO) packets */
if (lso_header_size) {
int i;
/* Mark opcode as LSO */
op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
lso_header_size -= hopbyhop;
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
shinfo->gso_size << 16 | lso_header_size);
if (unlikely(hopbyhop)) {
/* remove the HBH header.
* Layout: [Ethernet header][IPv6 header][HBH][TCP header]
*/
memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
h6->nexthdr = IPPROTO_TCP;
/* Copy the TCP header after the IPv6 one */
memcpy(h6 + 1,
skb->data + ETH_HLEN + sizeof(*h6) +
sizeof(struct hop_jumbo_hdr),
tcp_hdrlen(skb));
/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
} else {
/* Copy headers;
* note that we already verified that it is linear
*/
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
}
ring->tso_packets++;
i = shinfo->gso_segs;
tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
ring->packets += i;
} else {
/* Normal (Non LSO) packet */
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
ring->packets++;
}
ring->bytes += tx_info->nr_bytes;
if (tx_info->inl)
build_inline_wqe(tx_desc, skb, shinfo, fragptr);
if (skb->encapsulation) {
union {
struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} ip;
u8 proto;
ip.hdr = skb_inner_network_header(skb);
proto = (ip.v4->version == 4) ? ip.v4->protocol :
ip.v6->nexthdr;
if (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
else
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
}
WRITE_ONCE(ring->prod, ring->prod + nr_txbb);
/* If we used a bounce buffer then copy descriptor back into place */
if (unlikely(bounce))
tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
skb_tx_timestamp(skb);
/* Check available TXBBs And 2K spare for prefetch */
stop_queue = mlx4_en_is_tx_ring_full(ring);
if (unlikely(stop_queue)) {
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
}
send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
tx_info->nr_bytes,
netdev_xmit_more());
real_size = (real_size / 16) & 0x3f;
bf_ok &= desc_size <= MAX_BF && send_doorbell;
if (bf_ok)
qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
else
qpn_vlan.fence_size = real_size;
mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, desc_size, bf_index,
op_own, bf_ok, send_doorbell);
if (unlikely(stop_queue)) {
/* If queue was emptied after the if (stop_queue) , and before
* the netif_tx_stop_queue() - need to wake the queue,
* or else it will remain stopped forever.
* Need a memory barrier to make sure ring->cons was not
* updated before queue was stopped.
*/
smp_rmb();
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
}
return NETDEV_TX_OK;
tx_drop_count:
ring->tx_dropped++;
tx_drop:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
#define MLX4_EN_XDP_TX_NRTXBB 1
#define MLX4_EN_XDP_TX_REAL_SZ (((CTRL_SIZE + MLX4_EN_XDP_TX_NRTXBB * DS_SIZE) \
/ 16) & 0x3f)
void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring)
{
int i;
for (i = 0; i < ring->size; i++) {
struct mlx4_en_tx_info *tx_info = &ring->tx_info[i];
struct mlx4_en_tx_desc *tx_desc = ring->buf +
(i << LOG_TXBB_SIZE);
tx_info->map0_byte_count = PAGE_SIZE;
tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB;
tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data);
tx_info->ts_requested = 0;
tx_info->nr_maps = 1;
tx_info->linear = 1;
tx_info->inl = 0;
tx_desc->data.lkey = ring->mr_key;
tx_desc->ctrl.qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ;
tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
}
}
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
struct mlx4_en_priv *priv, unsigned int length,
int tx_ind, bool *doorbell_pending)
{
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_en_tx_info *tx_info;
struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_ring *ring;
dma_addr_t dma;
__be32 op_own;
int index;
if (unlikely(!priv->port_up))
goto tx_drop;
ring = priv->tx_ring[TX_XDP][tx_ind];
if (unlikely(mlx4_en_is_tx_ring_full(ring)))
goto tx_drop_count;
index = ring->prod & ring->size_mask;
tx_info = &ring->tx_info[index];
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
dma = frame->dma;
tx_info->page = frame->page;
frame->page = NULL;
tx_info->map0_dma = dma;
tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset,
length, DMA_TO_DEVICE);
data->addr = cpu_to_be64(dma + frame->page_offset);
dma_wmb();
data->byte_count = cpu_to_be32(length);
/* tx completion can avoid cache line miss for common cases */
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
rx_ring->xdp_tx++;
WRITE_ONCE(ring->prod, ring->prod + MLX4_EN_XDP_TX_NRTXBB);
/* Ensure new descriptor hits memory
* before setting ownership of this descriptor to HW
*/
dma_wmb();
tx_desc->ctrl.owner_opcode = op_own;
ring->xmit_more++;
*doorbell_pending = true;
return NETDEV_TX_OK;
tx_drop_count:
rx_ring->xdp_tx_full++;
*doorbell_pending = true;
tx_drop:
return NETDEV_TX_BUSY;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_tx.c
|
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/kernel.h>
#include <uapi/rdma/mlx4-abi.h>
#include "fw.h"
#include "icm.h"
enum {
MLX4_COMMAND_INTERFACE_MIN_REV = 2,
MLX4_COMMAND_INTERFACE_MAX_REV = 3,
MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
};
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
static bool enable_qos;
module_param(enable_qos, bool, 0444);
MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
__be64 val; \
switch (sizeof(dest)) { \
case 1: (dest) = *(u8 *) __p; break; \
case 2: (dest) = be16_to_cpup(__p); break; \
case 4: (dest) = be32_to_cpup(__p); break; \
case 8: val = get_unaligned((__be64 *)__p); \
(dest) = be64_to_cpu(val); break; \
default: __buggy_use_of_MLX4_GET(); \
} \
} while (0)
#define MLX4_PUT(dest, source, offset) \
do { \
void *__d = ((char *) (dest) + (offset)); \
switch (sizeof(source)) { \
case 1: *(u8 *) __d = (source); break; \
case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
default: __buggy_use_of_MLX4_PUT(); \
} \
} while (0)
static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
{
static const char *fname[] = {
[ 0] = "RC transport",
[ 1] = "UC transport",
[ 2] = "UD transport",
[ 3] = "XRC transport",
[ 6] = "SRQ support",
[ 7] = "IPoIB checksum offload",
[ 8] = "P_Key violation counter",
[ 9] = "Q_Key violation counter",
[12] = "Dual Port Different Protocol (DPDP) support",
[15] = "Big LSO headers",
[16] = "MW support",
[17] = "APM support",
[18] = "Atomic ops support",
[19] = "Raw multicast support",
[20] = "Address vector port checking support",
[21] = "UD multicast support",
[30] = "IBoE support",
[32] = "Unicast loopback support",
[34] = "FCS header control",
[37] = "Wake On LAN (port1) support",
[38] = "Wake On LAN (port2) support",
[40] = "UDP RSS support",
[41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support",
[48] = "Counters support",
[52] = "RSS IP fragments support",
[53] = "Port ETS Scheduler support",
[55] = "Port link type sensing support",
[59] = "Port management change event support",
[61] = "64 byte EQE support",
[62] = "64 byte CQE support",
};
int i;
mlx4_dbg(dev, "DEV_CAP flags:\n");
for (i = 0; i < ARRAY_SIZE(fname); ++i)
if (fname[i] && (flags & (1LL << i)))
mlx4_dbg(dev, " %s\n", fname[i]);
}
static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
{
static const char * const fname[] = {
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
[3] = "Device managed flow steering support",
[4] = "Automatic MAC reassignment support",
[5] = "Time stamping support",
[6] = "VST (control vlan insertion/stripping) support",
[7] = "FSM (MAC anti-spoofing) support",
[8] = "Dynamic QP updates support",
[9] = "Device managed flow steering IPoIB support",
[10] = "TCP/IP offloads/flow-steering for VXLAN support",
[11] = "MAD DEMUX (Secure-Host) support",
[12] = "Large cache line (>64B) CQE stride support",
[13] = "Large cache line (>64B) EQE stride support",
[14] = "Ethernet protocol control support",
[15] = "Ethernet Backplane autoneg support",
[16] = "CONFIG DEV support",
[17] = "Asymmetric EQs support",
[18] = "More than 80 VFs support",
[19] = "Performance optimized for limited rule configuration flow steering support",
[20] = "Recoverable error events support",
[21] = "Port Remap support",
[22] = "QCN support",
[23] = "QP rate limiting support",
[24] = "Ethernet Flow control statistics support",
[25] = "Granular QoS per VF support",
[26] = "Port ETS Scheduler support",
[27] = "Port beacon support",
[28] = "RX-ALL support",
[29] = "802.1ad offload support",
[31] = "Modifying loopback source checks using UPDATE_QP support",
[32] = "Loopback source checks support",
[33] = "RoCEv2 support",
[34] = "DMFS Sniffer support (UC & MC)",
[35] = "Diag counters per port",
[36] = "QinQ VST mode support",
[37] = "sl to vl mapping table change event support",
[38] = "user MAC support",
[39] = "Report driver version to FW support",
[40] = "SW CQ initialization support",
};
int i;
for (i = 0; i < ARRAY_SIZE(fname); ++i)
if (fname[i] && (flags & (1LL << i)))
mlx4_dbg(dev, " %s\n", fname[i]);
}
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *inbox;
int err = 0;
#define MOD_STAT_CFG_IN_SIZE 0x100
#define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
#define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 in_modifier;
u8 field;
u16 field16;
int err;
#define QUERY_FUNC_BUS_OFFSET 0x00
#define QUERY_FUNC_DEVICE_OFFSET 0x01
#define QUERY_FUNC_FUNCTION_OFFSET 0x01
#define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
#define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
#define QUERY_FUNC_MAX_EQ_OFFSET 0x06
#define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
in_modifier = slave;
err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
MLX4_CMD_QUERY_FUNC,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
func->bus = field & 0xf;
MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
func->device = field & 0xf1;
MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
func->function = field & 0x7;
MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
func->physical_function = field & 0xf;
MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
func->rsvd_eqs = field16 & 0xffff;
MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
func->max_eq = field16 & 0xffff;
MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
func->rsvd_uars = field & 0x0f;
mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
func->bus, func->device, func->function, func->physical_function,
func->max_eq, func->rsvd_eqs, func->rsvd_uars);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port)
{
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_vport_state *vp_admin;
int err;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
if (vp_admin->default_vlan != vp_oper->state.default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan,
&vp_oper->vlan_idx);
if (err) {
vp_oper->vlan_idx = NO_INDX;
mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
return err;
}
mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_oper->state.default_vlan),
vp_oper->vlan_idx, slave, port);
}
vp_oper->state.vlan_proto = vp_admin->vlan_proto;
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
return 0;
}
static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port)
{
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_slave_state *slave_state;
struct mlx4_vport_state *vp_admin;
int err;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
slave_state = &priv->mfunc.master.slave_state[slave];
if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) ||
(!slave_state->active))
return 0;
if (vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
vp_oper->state.default_vlan == vp_admin->default_vlan &&
vp_oper->state.default_qos == vp_admin->default_qos)
return 0;
if (!slave_state->vst_qinq_supported) {
/* Warn and revert the request to set vst QinQ mode */
vp_admin->vlan_proto = vp_oper->state.vlan_proto;
vp_admin->default_vlan = vp_oper->state.default_vlan;
vp_admin->default_qos = vp_oper->state.default_qos;
mlx4_warn(&priv->dev,
"Slave %d does not support VST QinQ mode\n", slave);
return 0;
}
err = mlx4_activate_vst_qinq(priv, slave, port);
return err;
}
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u8 field, port;
u32 size, proxy_qp, qkey;
int err = 0;
struct mlx4_func func;
#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
#define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
#define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c
#define QUERY_FUNC_CAP_FMR_FLAG 0x80
#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
#define QUERY_FUNC_CAP_FLAG_ETH 0x80
#define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
#define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
#define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
#define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
#define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30)
/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
#define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
#define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
#define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
#define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
#define QUERY_FUNC_CAP_QP0_PROXY 0x14
#define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
#define QUERY_FUNC_CAP_QP1_PROXY 0x1c
#define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
#define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
#define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
#define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
#define QUERY_FUNC_CAP_PHV_BIT 0x40
#define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20
#define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30)
#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31)
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
int converted_port = mlx4_slave_convert_port(
dev, slave, vhcr->in_modifier);
struct mlx4_vport_oper_state *vp_oper;
if (converted_port < 0)
return -EINVAL;
vhcr->in_modifier = converted_port;
/* phys-port = logical-port */
field = vhcr->in_modifier -
find_first_bit(actv_ports.ports, dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
port = vhcr->in_modifier;
proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
/* Set nic_info bit to mark new fields support */
field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
if (mlx4_vf_smi_enabled(dev, slave, port) &&
!mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
MLX4_PUT(outbox->buf, qkey,
QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
}
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
/* size is now the QP number */
size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
size += 2;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
proxy_qp += 2;
MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
err = mlx4_handle_vst_qinq(priv, slave, port);
if (err)
return err;
field = 0;
if (dev->caps.phv_bit[port])
field |= QUERY_FUNC_CAP_PHV_BIT;
if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
struct mlx4_slave_state *slave_state =
&priv->mfunc.master.slave_state[slave];
/* enable rdma and ethernet interfaces, new quota locations,
* and reserved lkey
*/
field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
QUERY_FUNC_CAP_FLAG_RESD_LKEY);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = min(
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
(unsigned int) dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
field = 0; /* protected FMR support not available as yet */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
size = dev->caps.num_qps;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
size = dev->caps.num_srqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
size = dev->caps.num_cqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
mlx4_QUERY_FUNC(dev, &func, slave)) {
size = vhcr->in_modifier &
QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
dev->caps.num_eqs :
rounddown_pow_of_two(dev->caps.num_eqs);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
size = dev->caps.reserved_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
} else {
size = vhcr->in_modifier &
QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
func.max_eq :
rounddown_pow_of_two(func.max_eq);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
size = func.rsvd_eqs;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
}
size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
size = dev->caps.num_mpts;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
size = dev->caps.num_mtts;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
size = dev->caps.num_mgms + dev->caps.num_amgms;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ)
slave_state->vst_qinq_supported = true;
} else
err = -EINVAL;
return err;
}
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
struct mlx4_func_cap *func_cap)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field, op_modifier;
u32 size, qkey;
int err = 0, quotas = 0;
u32 in_modifier;
u32 slave_caps;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ |
QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
in_modifier = op_modifier ? gen_or_port : slave_caps;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
MLX4_CMD_QUERY_FUNC_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
goto out;
outbox = mailbox->buf;
if (!op_modifier) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
err = -EPROTONOSUPPORT;
goto out;
}
func_cap->flags = field;
quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
func_cap->pf_context_behaviour = size;
if (quotas) {
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
func_cap->qp_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
func_cap->srq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
func_cap->cq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
func_cap->mpt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
func_cap->mtt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
func_cap->mcg_quota = size & 0xFFFFFF;
} else {
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
func_cap->qp_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
func_cap->srq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
func_cap->cq_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
func_cap->mpt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
func_cap->mtt_quota = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
func_cap->mcg_quota = size & 0xFFFFFF;
}
MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
func_cap->max_eq = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
func_cap->reserved_eq = size & 0xFFFFFF;
if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
func_cap->reserved_lkey = size;
} else {
func_cap->reserved_lkey = 0;
}
func_cap->extra_flags = 0;
/* Mailbox data from 0x6c and onward should only be treated if
* QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
*/
if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG)
func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP;
}
goto out;
}
/* logical port query */
if (gen_or_port > dev->caps.num_ports) {
err = -EINVAL;
goto out;
}
MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
mlx4_err(dev, "VLAN is enforced on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
mlx4_err(dev, "Force mac is enabled on this port\n");
err = -EPROTONOSUPPORT;
goto out;
}
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
err = -EPROTONOSUPPORT;
goto out;
}
}
MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
func_cap->physical_port = field;
if (func_cap->physical_port != gen_or_port) {
err = -EINVAL;
goto out;
}
if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
func_cap->spec_qps.qp0_qkey = qkey;
} else {
func_cap->spec_qps.qp0_qkey = 0;
}
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
func_cap->spec_qps.qp0_tunnel = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
func_cap->spec_qps.qp0_proxy = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
func_cap->spec_qps.qp1_tunnel = size & 0xFFFFFF;
MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
func_cap->spec_qps.qp1_proxy = size & 0xFFFFFF;
if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
* - 'num - reserved' is the total available objects of a resource, but
* resource indices may be less than 'reserved'
* TODO: set per-resource quotas */
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
static void disable_unsupported_roce_caps(void *buf);
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field;
u32 field32, flags, ext_flags;
u16 size;
u16 stat_rate;
int err;
int i;
#define QUERY_DEV_CAP_OUT_SIZE 0x100
#define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
#define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
#define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
#define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
#define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
#define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
#define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
#define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
#define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
#define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
#define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
#define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
#define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
#define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
#define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
#define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
#define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
#define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
#define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
#define QUERY_DEV_CAP_RSS_OFFSET 0x2e
#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
#define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
#define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
#define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
#define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
#define QUERY_DEV_CAP_WOL_OFFSET 0x43
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
#define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
#define QUERY_DEV_CAP_BF_OFFSET 0x4c
#define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
#define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
#define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
#define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
#define QUERY_DEV_CAP_USER_MAC_EN_OFFSET 0x5C
#define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D
#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70
#define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78
#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
#define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
#define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
#define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
#define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
#define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
#define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
#define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
#define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
#define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
#define QUERY_DEV_CAP_MAP_CLOCK_TO_USER 0xc1
#define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
#define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
#define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
if (mlx4_is_mfunc(dev))
disable_unsupported_roce_caps(outbox);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAP_CLOCK_TO_USER);
dev_cap->map_clock_to_user = field & 0x80;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
dev_cap->reserved_qps = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
dev_cap->max_qps = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
dev_cap->reserved_srqs = 1 << (field >> 4);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
dev_cap->max_srqs = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
dev_cap->max_cq_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
dev_cap->reserved_cqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
dev_cap->max_cqs = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
dev_cap->max_mpts = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
dev_cap->reserved_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
dev_cap->max_eqs = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
dev_cap->reserved_mtts = 1 << (field >> 4);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
dev_cap->reserved_mrws = 1 << (field & 0xf);
MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
dev_cap->num_sys_eqs = size & 0xfff;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
field &= 0x1f;
if (!field)
dev_cap->max_gso_sz = 0;
else
dev_cap->max_gso_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
if (field & 0x20)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
if (field & 0x10)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
field &= 0xf;
if (field) {
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
dev_cap->max_rss_tbl_sz = 1 << field;
} else
dev_cap->max_rss_tbl_sz = 0;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
dev_cap->max_rdma_global = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
dev_cap->local_ca_ack_delay = field & 0x1f;
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->num_ports = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
if (field & 0x10)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
if (field & 0x20)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER;
MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET);
if (field & (1 << 5))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
if (field & 0x1)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
dev_cap->flags = flags | (u64)ext_flags << 32;
MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
dev_cap->wol_port[1] = !!(field & 0x20);
dev_cap->wol_port[2] = !!(field & 0x40);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
dev_cap->min_page_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
if (field & 0x80) {
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
dev_cap->bf_reg_size = 1 << (field & 0x1f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
field = 3;
dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
} else {
dev_cap->bf_reg_size = 0;
}
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
dev_cap->max_sq_sg = field;
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
dev_cap->max_sq_desc_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_USER_MAC_EN_OFFSET);
if (field & (1 << 2))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_USER_MAC_EN;
MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET);
if (field & 0x1)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
dev_cap->max_qp_per_mcg = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
dev_cap->reserved_mgms = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
dev_cap->max_mcgs = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
dev_cap->reserved_pds = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
dev_cap->max_pds = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
dev_cap->reserved_xrcds = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
dev_cap->max_xrcds = 1 << (field & 0x1f);
MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
dev_cap->rdmarc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
dev_cap->qpc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
dev_cap->aux_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
dev_cap->altc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
dev_cap->eqc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
dev_cap->cqc_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
dev_cap->srq_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
dev_cap->cmpt_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
dev_cap->mtt_entry_sz = size;
MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
dev_cap->dmpt_entry_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
dev_cap->max_srq_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
dev_cap->max_qp_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
dev_cap->resize_srq = field & 1;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
dev_cap->max_rq_sg = field;
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
dev_cap->max_rq_desc_sz = size;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
if (field & (1 << 4))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
if (field & (1 << 5))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
if (field & (1 << 6))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
if (field & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
MLX4_GET(dev_cap->bmme_flags, outbox,
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
if (dev_cap->bmme_flags & MLX4_FLAG_ROCE_V1_V2)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2;
if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
if (field & 0x20)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
if (field & (1 << 2))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
if (field & 0x40)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
if (field32 & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
if (field32 & (1 << 8))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW;
MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
if (field32 & (1 << 17))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
if (field & 1<<3)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
if (field & (1 << 5))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
MLX4_GET(dev_cap->max_counters, outbox,
QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
MLX4_GET(field32, outbox,
QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
if (field32 & (1 << 0))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
dev_cap->rl_caps.num_rates = size;
if (dev_cap->rl_caps.num_rates) {
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT;
MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET);
dev_cap->rl_caps.max_val = size & 0xfff;
dev_cap->rl_caps.max_unit = size >> 14;
MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET);
dev_cap->rl_caps.min_val = size & 0xfff;
dev_cap->rl_caps.min_unit = size >> 14;
}
MLX4_GET(dev_cap->health_buffer_addrs, outbox,
QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
if (field32 & (1 << 16))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
if (field32 & (1 << 18))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB;
if (field32 & (1 << 19))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
if (field32 & (1 << 26))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
if (field32 & (1 << 20))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
if (field32 & (1 << 21))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
if (field32 & (1 << 23))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
for (i = 1; i <= dev_cap->num_ports; i++) {
err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
if (err)
goto out;
}
/*
* Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
* we can't use any EQs whose doorbell falls on that page,
* even if the EQ itself isn't reserved.
*/
if (dev_cap->num_sys_eqs == 0)
dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
dev_cap->reserved_eqs);
else
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
if (dev_cap->bf_reg_size > 0)
mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
else
mlx4_dbg(dev, "BlueFlame not available\n");
mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
dev_cap->bmme_flags, dev_cap->reserved_lkey);
mlx4_dbg(dev, "Max ICM size %lld MB\n",
(unsigned long long) dev_cap->max_icm_sz >> 20);
mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
dev_cap->eqc_entry_sz);
mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
dev_cap->reserved_mrws, dev_cap->reserved_mtts);
mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
dev_cap->max_pds, dev_cap->reserved_mgms);
mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu,
dev_cap->port_cap[1].max_port_width);
mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
dev_cap->dmfs_high_rate_qpn_base);
mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
dev_cap->dmfs_high_rate_qpn_range);
if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) {
struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps;
mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val,
rl_caps->min_unit, rl_caps->min_val);
}
dump_dev_cap_flags(dev, dev_cap->flags);
dump_dev_cap_flags2(dev, dev_cap->flags2);
}
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field;
u32 field32;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
port_cap->max_vl = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
port_cap->ib_mtu = field >> 4;
port_cap->max_port_width = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
port_cap->max_gids = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
port_cap->max_pkeys = 1 << (field & 0xf);
} else {
#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
#define QUERY_PORT_MTU_OFFSET 0x01
#define QUERY_PORT_ETH_MTU_OFFSET 0x02
#define QUERY_PORT_WIDTH_OFFSET 0x06
#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
#define QUERY_PORT_MAX_VL_OFFSET 0x0b
#define QUERY_PORT_MAC_OFFSET 0x10
#define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
#define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
#define QUERY_PORT_TRANS_CODE_OFFSET 0x20
err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
port_cap->link_state = (field & 0x80) >> 7;
port_cap->supported_port_types = field & 3;
port_cap->suggested_type = (field >> 3) & 1;
port_cap->default_sense = (field >> 4) & 1;
port_cap->dmfs_optimized_state = (field >> 5) & 1;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
port_cap->ib_mtu = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
port_cap->max_port_width = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
port_cap->max_gids = 1 << (field >> 4);
port_cap->max_pkeys = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
port_cap->max_vl = field & 0xf;
port_cap->max_tc_eth = field >> 4;
MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
port_cap->log_max_macs = field & 0xf;
port_cap->log_max_vlans = field >> 4;
MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET);
MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET);
MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
port_cap->trans_type = field32 >> 24;
port_cap->vendor_oui = field32 & 0xffffff;
MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
#define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28)
#define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
#define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21)
#define DEV_CAP_EXT_2_FLAG_FSM (1 << 20)
int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
u64 flags;
int err = 0;
u8 field;
u16 field16;
u32 bmme_flags, field32;
int real_port;
int slave_port;
int first_port;
struct mlx4_active_ports actv_ports;
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
disable_unsupported_roce_caps(outbox->buf);
/* add port mng change event capability and disable mw type 1
* unconditionally to slaves
*/
MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
actv_ports = mlx4_get_active_ports(dev, slave);
first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
for (slave_port = 0, real_port = first_port;
real_port < first_port +
bitmap_weight(actv_ports.ports, dev->caps.num_ports);
++real_port, ++slave_port) {
if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
else
flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
}
for (; slave_port < dev->caps.num_ports; ++slave_port)
flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
/* Not exposing RSS IP fragments to guests */
flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
field &= ~0x0F;
field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
/* For guests, disable timestamp */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
/* For guests, disable vxlan tunneling and QoS support */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
field &= 0xd7;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
/* For guests, disable port BEACON */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
/* For guests, report Blueflame disabled */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
field &= 0x7f;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
/* For guests, disable mw type 2 and port remap*/
MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
/* turn off device-managed steering capability if not enabled */
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(field, outbox->buf,
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
field &= 0x7f;
MLX4_PUT(outbox->buf, field,
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
}
/* turn off ipoib managed steering for guests */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
field &= ~0x80;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
/* turn off host side virt features (VST, FSM, etc) for guests */
MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS);
MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
/* turn off QCN for guests */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
field &= 0xfe;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
/* turn off QP max-rate limiting for guests */
field16 = 0;
MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
/* turn off QoS per VF support for guests */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
field &= 0xef;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
/* turn off ignore FCS feature for guests */
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
field &= 0xfb;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
return 0;
}
static void disable_unsupported_roce_caps(void *buf)
{
u32 flags;
MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
flags &= ~(1UL << 31);
MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
MLX4_GET(flags, buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
flags &= ~(1UL << 24);
MLX4_PUT(buf, flags, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
MLX4_GET(flags, buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
flags &= ~(MLX4_FLAG_ROCE_V1_V2);
MLX4_PUT(buf, flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
}
int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 def_mac;
u8 port_type;
u16 short_field;
int err;
int admin_link_state;
int port = mlx4_slave_convert_port(dev, slave,
vhcr->in_modifier & 0xFF);
#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
#define MLX4_PORT_LINK_UP_MASK 0x80
#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
if (port < 0)
return -EINVAL;
/* Protect against untrusted guests: enforce that this is the
* QUERY_PORT general query.
*/
if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF)
return -EINVAL;
vhcr->in_modifier = port;
err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
if (!err && dev->caps.function != slave) {
def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
/* get port type - currently only eth is enabled */
MLX4_GET(port_type, outbox->buf,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
/* No link sensing allowed */
port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
/* set port type to currently operating port type */
port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
port_type |= MLX4_PORT_LINK_UP_MASK;
else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
port_type &= ~MLX4_PORT_LINK_UP_MASK;
else if (IFLA_VF_LINK_STATE_AUTO == admin_link_state && mlx4_is_bonded(dev)) {
int other_port = (port == 1) ? 2 : 1;
struct mlx4_port_cap port_cap;
err = mlx4_QUERY_PORT(dev, other_port, &port_cap);
if (err)
goto out;
port_type |= (port_cap.link_state << 7);
}
MLX4_PUT(outbox->buf, port_type,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
short_field = mlx4_get_slave_num_gids(dev, slave, port);
else
short_field = 1; /* slave max gids */
MLX4_PUT(outbox->buf, short_field,
QUERY_PORT_CUR_MAX_GID_OFFSET);
short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
MLX4_PUT(outbox->buf, short_field,
QUERY_PORT_CUR_MAX_PKEY_OFFSET);
}
out:
return err;
}
int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
int *gid_tbl_len, int *pkey_tbl_len)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u16 field;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err)
goto out;
outbox = mailbox->buf;
MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
*gid_tbl_len = field;
MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
*pkey_tbl_len = field;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_icm_iter iter;
__be64 *pages;
int lg;
int nent = 0;
int i;
int err = 0;
int ts = 0, tc = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
pages = mailbox->buf;
for (mlx4_icm_first(icm, &iter);
!mlx4_icm_last(&iter);
mlx4_icm_next(&iter)) {
/*
* We have to pass pages that are aligned to their
* size, so find the least significant 1 in the
* address or size and use that as our log2 size.
*/
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
if (lg < MLX4_ICM_PAGE_SHIFT) {
mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
MLX4_ICM_PAGE_SIZE,
(unsigned long long) mlx4_icm_addr(&iter),
mlx4_icm_size(&iter));
err = -EINVAL;
goto out;
}
for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
if (virt != -1) {
pages[nent * 2] = cpu_to_be64(virt);
virt += 1ULL << lg;
}
pages[nent * 2 + 1] =
cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
(lg - MLX4_ICM_PAGE_SHIFT));
ts += 1 << (lg - 10);
++tc;
if (++nent == MLX4_MAILBOX_SIZE / 16) {
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
if (err)
goto out;
nent = 0;
}
}
}
if (nent)
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
switch (op) {
case MLX4_CMD_MAP_FA:
mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM_AUX:
mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM:
mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
tc, ts, (unsigned long long) virt - (ts << 10));
break;
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
}
int mlx4_UNMAP_FA(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_RUN_FW(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
int mlx4_QUERY_FW(struct mlx4_dev *dev)
{
struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
int err = 0;
u64 fw_ver;
u16 cmd_if_rev;
u8 lg;
#define QUERY_FW_OUT_SIZE 0x100
#define QUERY_FW_VER_OFFSET 0x00
#define QUERY_FW_PPF_ID 0x09
#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
#define QUERY_FW_MAX_CMD_OFFSET 0x0f
#define QUERY_FW_ERR_START_OFFSET 0x30
#define QUERY_FW_ERR_SIZE_OFFSET 0x38
#define QUERY_FW_ERR_BAR_OFFSET 0x3c
#define QUERY_FW_SIZE_OFFSET 0x00
#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
#define QUERY_FW_COMM_BASE_OFFSET 0x40
#define QUERY_FW_COMM_BAR_OFFSET 0x48
#define QUERY_FW_CLOCK_OFFSET 0x50
#define QUERY_FW_CLOCK_BAR 0x58
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
/*
* FW subminor version is at more significant bits than minor
* version, so swap here.
*/
dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
dev->caps.function = lg;
if (mlx4_is_slave(dev))
goto out;
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
cmd_if_rev);
mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
(int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff);
mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
err = -ENODEV;
goto out;
}
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
cmd->max_cmds = 1 << lg;
mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
(int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff,
cmd_if_rev, cmd->max_cmds);
MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
fw->catas_bar = (fw->catas_bar >> 6) * 2;
mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
(unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
fw->comm_bar = (fw->comm_bar >> 6) * 2;
mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
fw->comm_bar, fw->comm_base);
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
fw->clock_bar = (fw->clock_bar >> 6) * 2;
mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
fw->clock_bar, fw->clock_offset);
/*
* Round up number of system pages needed in case
* MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
*/
fw->fw_pages =
ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
(unsigned long long) fw->clr_int_base, fw->clr_int_bar);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
u8 *outbuf;
int err;
outbuf = outbox->buf;
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
/* for slaves, set pci PPF ID to invalid and zero out everything
* else except FW version */
outbuf[0] = outbuf[1] = 0;
memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
return 0;
}
static void get_board_id(void *vsd, char *board_id)
{
int i;
#define VSD_OFFSET_SIG1 0x00
#define VSD_OFFSET_SIG2 0xde
#define VSD_OFFSET_MLX_BOARD_ID 0xd0
#define VSD_OFFSET_TS_BOARD_ID 0x20
#define VSD_SIGNATURE_TOPSPIN 0x5ad
memset(board_id, 0, MLX4_BOARD_ID_LEN);
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
* swaps each 4-byte word before passing it back to
* us. Therefore we need to swab it before printing.
*/
u32 *bid_u32 = (u32 *)board_id;
for (i = 0; i < 4; ++i) {
u32 *addr;
u32 val;
addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
val = get_unaligned(addr);
val = swab32(val);
put_unaligned(val, &bid_u32[i]);
}
}
}
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
int err;
#define QUERY_ADAPTER_OUT_SIZE 0x100
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
#define QUERY_ADAPTER_VSD_OFFSET 0x20
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
adapter->board_id);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *inbox;
int err;
static const u8 a0_dmfs_hw_steering[] = {
[MLX4_STEERING_DMFS_A0_DEFAULT] = 0,
[MLX4_STEERING_DMFS_A0_DYNAMIC] = 1,
[MLX4_STEERING_DMFS_A0_STATIC] = 2,
[MLX4_STEERING_DMFS_A0_DISABLE] = 3
};
#define INIT_HCA_IN_SIZE 0x200
#define INIT_HCA_VERSION_OFFSET 0x000
#define INIT_HCA_VERSION 2
#define INIT_HCA_VXLAN_OFFSET 0x0c
#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
#define INIT_HCA_FLAGS_OFFSET 0x014
#define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
#define INIT_HCA_QPC_OFFSET 0x020
#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
#define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
#define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
#define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
#define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
#define INIT_HCA_MCAST_OFFSET 0x0c0
#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
#define INIT_HCA_DRIVER_VERSION_OFFSET 0x140
#define INIT_HCA_DRIVER_VERSION_SZ 0x40
#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13)
#define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
#define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
#define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
#define INIT_HCA_UAR_OFFSET 0x120
#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
#if defined(__LITTLE_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
#elif defined(__BIG_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
#else
#error Host endianness not defined
#endif
/* Check port for UD address vector: */
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
/* Enable IPoIB checksumming if we can: */
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
/* Enable QoS support if module parameter set */
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
/* enable counters */
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
/* Enable RSS spread to fragmented IP packets when supported */
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
dev->caps.eqe_size = 64;
dev->caps.eqe_factor = 1;
} else {
dev->caps.eqe_size = 32;
dev->caps.eqe_factor = 0;
}
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
*(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
dev->caps.cqe_size = 64;
dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
} else {
dev->caps.cqe_size = 32;
}
/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
dev->caps.eqe_size = cache_line_size();
dev->caps.cqe_size = cache_line_size();
dev->caps.eqe_factor = 0;
MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
(ilog2(dev->caps.eqe_size) - 5)),
INIT_HCA_EQE_CQE_STRIDE_OFFSET);
/* User still need to know to support CQE > 32B */
dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
}
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
*(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4);
strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1);
mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst);
}
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
/* steering attributes */
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
cpu_to_be32(1 <<
INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
MLX4_PUT(inbox, param->log_mc_entry_sz,
INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_table_sz,
INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
/* Enable Ethernet flow steering
* with udp unicast and tcp unicast
*/
if (dev->caps.dmfs_high_steer_mode !=
MLX4_STEERING_DMFS_A0_STATIC)
MLX4_PUT(inbox,
(u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_ETH_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
/* Enable IPoIB flow steering
* with udp unicast and tcp unicast
*/
MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_IB_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
if (dev->caps.dmfs_high_steer_mode !=
MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
MLX4_PUT(inbox,
((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
<< 6)),
INIT_HCA_FS_A0_OFFSET);
} else {
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_mc_entry_sz,
INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_hash_sz,
INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_table_sz,
INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
MLX4_PUT(inbox, (u8) (1 << 3),
INIT_HCA_UC_STEERING_OFFSET);
}
/* TPT attributes */
MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
/* UAR attributes */
MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
/* set parser VXLAN attributes */
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
u8 parser_params = 0;
MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
}
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_QUERY_HCA(struct mlx4_dev *dev,
struct mlx4_init_hca_param *param)
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
u64 qword_field;
u32 dword_field;
u16 word_field;
u8 byte_field;
int err;
static const u8 a0_dmfs_query_hw_steering[] = {
[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
[2] = MLX4_STEERING_DMFS_A0_STATIC,
[3] = MLX4_STEERING_DMFS_A0_DISABLE
};
#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
#define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
MLX4_CMD_QUERY_HCA,
MLX4_CMD_TIME_CLASS_B,
!mlx4_is_slave(dev));
if (err)
goto out;
MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
param->qpc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
param->log_num_qps = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
param->srqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
param->log_num_srqs = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
param->cqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
param->log_num_cqs = byte_field & 0x1f;
MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
param->altc_base = qword_field;
MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
param->auxc_base = qword_field;
MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
param->eqc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
param->log_num_eqs = byte_field & 0x1f;
MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
param->num_sys_eqs = word_field & 0xfff;
MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
param->rdmarc_base = qword_field & ~((u64)0x1f);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
param->log_rd_per_qp = byte_field & 0x7;
MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
} else {
MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
if (byte_field & 0x8)
param->steering_mode = MLX4_STEERING_MODE_B0;
else
param->steering_mode = MLX4_STEERING_MODE_A0;
}
if (dword_field & (1 << 13))
param->rss_ip_frags = 1;
/* steering attributes */
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
param->log_mc_entry_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
param->log_mc_table_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
param->dmfs_high_steer_mode =
a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
} else {
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
param->log_mc_entry_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
param->log_mc_hash_sz = byte_field & 0x1f;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
param->log_mc_table_sz = byte_field & 0x1f;
}
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
if (byte_field & 0x20) /* 64-bytes eqe enabled */
param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
if (byte_field & 0x40) /* 64-bytes cqe enabled */
param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
/* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
if (byte_field) {
param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
param->cqe_size = 1 << ((byte_field &
MLX4_CQE_SIZE_MASK_STRIDE) + 5);
param->eqe_size = 1 << (((byte_field &
MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
}
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
param->mw_enabled = byte_field >> 7;
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
param->log_mpt_sz = byte_field & 0x3f;
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
/* UAR attributes */
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
param->log_uar_sz = byte_field & 0xf;
/* phv_check enable */
MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
if (byte_field & 0x2)
param->phv_check_en = 1;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
return PTR_ERR(mailbox);
}
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
MLX4_CMD_QUERY_HCA,
MLX4_CMD_TIME_CLASS_B,
!mlx4_is_slave(dev));
if (err) {
mlx4_warn(dev, "hca_core_clock update failed\n");
goto out;
}
MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
/* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
* and real QP0 are active, so that the paravirtualized QP0 is ready
* to operate */
static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
/* irrelevant if not infiniband */
if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
priv->mfunc.master.qp0_state[port].qp0_active)
return 1;
return 0;
}
int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
int err;
if (port < 0)
return -EINVAL;
if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
return 0;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
/* Enable port only if it was previously disabled */
if (!priv->mfunc.master.init_port_ref[port]) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
}
priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
} else {
if (slave == mlx4_master_func_num(dev)) {
if (check_qp0_state(dev, slave, port) &&
!priv->mfunc.master.qp0_state[port].port_active) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
priv->mfunc.master.qp0_state[port].port_active = 1;
priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
}
} else
priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
}
++priv->mfunc.master.init_port_ref[port];
return 0;
}
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *inbox;
int err;
u32 flags;
u16 field;
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
#define INIT_PORT_IN_SIZE 256
#define INIT_PORT_FLAGS_OFFSET 0x00
#define INIT_PORT_FLAG_SIG (1 << 18)
#define INIT_PORT_FLAG_NG (1 << 17)
#define INIT_PORT_FLAG_G0 (1 << 16)
#define INIT_PORT_VL_SHIFT 4
#define INIT_PORT_PORT_WIDTH_SHIFT 8
#define INIT_PORT_MTU_OFFSET 0x04
#define INIT_PORT_MAX_GID_OFFSET 0x06
#define INIT_PORT_MAX_PKEY_OFFSET 0x0a
#define INIT_PORT_GUID0_OFFSET 0x10
#define INIT_PORT_NODE_GUID_OFFSET 0x18
#define INIT_PORT_SI_GUID_OFFSET 0x20
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
flags = 0;
flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
field = 128 << dev->caps.ib_mtu_cap[port];
MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
field = dev->caps.gid_table_len[port];
MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
field = dev->caps.pkey_table_len[port];
MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
} else
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
mlx4_hca_core_clock_update(dev);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
int err;
if (port < 0)
return -EINVAL;
if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
(1 << port)))
return 0;
if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
if (priv->mfunc.master.init_port_ref[port] == 1) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
}
priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
} else {
/* infiniband port */
if (slave == mlx4_master_func_num(dev)) {
if (!priv->mfunc.master.qp0_state[port].qp0_active &&
priv->mfunc.master.qp0_state[port].port_active) {
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
return err;
priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
priv->mfunc.master.qp0_state[port].port_active = 0;
}
} else
priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
}
--priv->mfunc.master.init_port_ref[port];
return 0;
}
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{
return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{
return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
struct mlx4_config_dev {
__be32 update_flags;
__be32 rsvd1[3];
__be16 vxlan_udp_dport;
__be16 rsvd2;
__be16 roce_v2_entropy;
__be16 roce_v2_udp_dport;
__be32 roce_flags;
__be32 rsvd4[25];
__be16 rsvd5;
u8 rsvd6;
u8 rx_checksum_val;
};
#define MLX4_VXLAN_UDP_DPORT (1 << 0)
#define MLX4_ROCE_V2_UDP_DPORT BIT(3)
#define MLX4_DISABLE_RX_PORT BIT(18)
static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
{
int err;
struct mlx4_cmd_mailbox *mailbox;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
{
int err;
struct mlx4_cmd_mailbox *mailbox;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (!err)
memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
/* Conversion between the HW values and the actual functionality.
* The value represented by the array index,
* and the functionality determined by the flags.
*/
static const u8 config_dev_csum_flags[] = {
[0] = 0,
[1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
[2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP |
MLX4_RX_CSUM_MODE_L4,
[3] = MLX4_RX_CSUM_MODE_L4 |
MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP |
MLX4_RX_CSUM_MODE_MULTI_VLAN
};
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
struct mlx4_config_dev_params *params)
{
struct mlx4_config_dev config_dev = {0};
int err;
u8 csum_mask;
#define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
#define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
#define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
return -EOPNOTSUPP;
err = mlx4_CONFIG_DEV_get(dev, &config_dev);
if (err)
return err;
csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
CONFIG_DEV_RX_CSUM_MODE_MASK;
if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags))
return -EINVAL;
params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
CONFIG_DEV_RX_CSUM_MODE_MASK;
if (csum_mask >= ARRAY_SIZE(config_dev_csum_flags))
return -EINVAL;
params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
{
struct mlx4_config_dev config_dev;
memset(&config_dev, 0, sizeof(config_dev));
config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
config_dev.vxlan_udp_dport = udp_port;
return mlx4_CONFIG_DEV_set(dev, &config_dev);
}
EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
#define CONFIG_DISABLE_RX_PORT BIT(15)
int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
{
struct mlx4_config_dev config_dev;
memset(&config_dev, 0, sizeof(config_dev));
config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
if (dis)
config_dev.roce_flags =
cpu_to_be32(CONFIG_DISABLE_RX_PORT);
return mlx4_CONFIG_DEV_set(dev, &config_dev);
}
int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port)
{
struct mlx4_config_dev config_dev;
memset(&config_dev, 0, sizeof(config_dev));
config_dev.update_flags = cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT);
config_dev.roce_v2_udp_dport = cpu_to_be16(udp_port);
return mlx4_CONFIG_DEV_set(dev, &config_dev);
}
EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port);
int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
{
struct mlx4_cmd_mailbox *mailbox;
struct {
__be32 v_port1;
__be32 v_port2;
} *v2p;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return -ENOMEM;
v2p = mailbox->buf;
v2p->v_port1 = cpu_to_be32(port1);
v2p->v_port2 = cpu_to_be32(port2);
err = mlx4_cmd(dev, mailbox->dma, 0,
MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
MLX4_CMD_SET_ICM_SIZE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (ret)
return ret;
/*
* Round up number of system pages needed in case
* MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
*/
*aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
(PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
return 0;
}
int mlx4_NOP(struct mlx4_dev *dev)
{
/* Input modifier of 0x1f means "finish as soon as possible." */
return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
}
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
const u32 offset[],
u32 value[], size_t array_len, u8 port)
{
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
size_t i;
int ret;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier,
MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (ret)
goto out;
for (i = 0; i < array_len; i++) {
if (offset[i] > MLX4_MAILBOX_SIZE) {
ret = -EINVAL;
goto out;
}
MLX4_GET(value[i], outbox, offset[i]);
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
EXPORT_SYMBOL(mlx4_query_diag_counters);
int mlx4_get_phys_port_id(struct mlx4_dev *dev)
{
u8 port;
u32 *outbox;
struct mlx4_cmd_mailbox *mailbox;
u32 in_mod;
u32 guid_hi, guid_lo;
int err, ret = 0;
#define MOD_STAT_CFG_PORT_OFFSET 8
#define MOD_STAT_CFG_GUID_H 0X14
#define MOD_STAT_CFG_GUID_L 0X1c
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
for (port = 1; port <= dev->caps.num_ports; port++) {
in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
mlx4_err(dev, "Fail to get port %d uplink guid\n",
port);
ret = err;
} else {
MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
dev->caps.phys_port_id[port] = (u64)guid_lo |
(u64)guid_hi << 32;
}
}
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
#define MLX4_WOL_SETUP_MODE (5 << 28)
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
{
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_read);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
{
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
enum {
ADD_TO_MCG = 0x26,
};
void mlx4_opreq_action(struct work_struct *work)
{
struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
opreq_task);
struct mlx4_dev *dev = &priv->dev;
int num_tasks = atomic_read(&priv->opreq_count);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 *outbox;
u32 modifier;
u16 token;
u16 type;
int err;
u32 num_qps;
struct mlx4_qp qp;
int i;
u8 rem_mcg;
u8 prot;
#define GET_OP_REQ_MODIFIER_OFFSET 0x08
#define GET_OP_REQ_TOKEN_OFFSET 0x14
#define GET_OP_REQ_TYPE_OFFSET 0x1a
#define GET_OP_REQ_DATA_OFFSET 0x20
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
return;
}
outbox = mailbox->buf;
while (num_tasks) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
mlx4_err(dev, "Failed to retrieve required operation: %d\n",
err);
goto out;
}
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
type &= 0xfff;
switch (type) {
case ADD_TO_MCG:
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
err = EPERM;
break;
}
mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
GET_OP_REQ_DATA_OFFSET);
num_qps = be32_to_cpu(mgm->members_count) &
MGM_QPN_MASK;
rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
prot = ((u8 *)(&mgm->members_count))[0] >> 6;
for (i = 0; i < num_qps; i++) {
qp.qpn = be32_to_cpu(mgm->qp[i]);
if (rem_mcg)
err = mlx4_multicast_detach(dev, &qp,
mgm->gid,
prot, 0);
else
err = mlx4_multicast_attach(dev, &qp,
mgm->gid,
mgm->gid[5]
, 0, prot,
NULL);
if (err)
break;
}
break;
default:
mlx4_warn(dev, "Bad type for required operation\n");
err = EINVAL;
break;
}
err = mlx4_cmd(dev, 0, ((u32) err |
(__force u32)cpu_to_be32(token) << 16),
1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
mlx4_err(dev, "Failed to acknowledge required request: %d\n",
err);
goto out;
}
memset(outbox, 0, 0xffc);
num_tasks = atomic_dec_return(&priv->opreq_count);
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
}
static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox)
{
#define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
#define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
#define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
#define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
u32 set_attr_mask, getresp_attr_mask;
u32 trap_attr_mask, traprepress_attr_mask;
MLX4_GET(set_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
set_attr_mask);
MLX4_GET(getresp_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
getresp_attr_mask);
MLX4_GET(trap_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
trap_attr_mask);
MLX4_GET(traprepress_attr_mask, mailbox->buf,
MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
traprepress_attr_mask);
if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
traprepress_attr_mask)
return 1;
return 0;
}
int mlx4_config_mad_demux(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
int err;
/* Check if mad_demux is supported */
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
return -ENOMEM;
}
/* Query mad_demux to find out which MADs are handled by internal sma */
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err) {
mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
err);
goto out;
}
if (mlx4_check_smp_firewall_active(dev, mailbox))
dev->flags |= MLX4_FLAG_SECURE_HOST;
/* Config mad_demux to handle all MADs returned by the query above */
err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err) {
mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
goto out;
}
if (dev->flags & MLX4_FLAG_SECURE_HOST)
mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
/* Access Reg commands */
enum mlx4_access_reg_masks {
MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
MLX4_ACCESS_REG_LEN_MASK = 0x7ff
};
struct mlx4_access_reg {
__be16 constant1;
u8 status;
u8 resrvd1;
__be16 reg_id;
u8 method;
u8 constant2;
__be32 resrvd2[2];
__be16 len_const;
__be16 resrvd3;
#define MLX4_ACCESS_REG_HEADER_SIZE (20)
u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
} __attribute__((__packed__));
/**
* mlx4_ACCESS_REG - Generic access reg command.
* @dev: mlx4_dev.
* @reg_id: register ID to access.
* @method: Access method Read/Write.
* @reg_len: register length to Read/Write in bytes.
* @reg_data: reg_data pointer to Read/Write From/To.
*
* Access ConnectX registers FW command.
* Returns 0 on success and copies outbox mlx4_access_reg data
* field into reg_data or a negative error code.
*/
static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
enum mlx4_access_reg_method method,
u16 reg_len, void *reg_data)
{
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_access_reg *inbuf, *outbuf;
int err;
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
outbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outbox)) {
mlx4_free_cmd_mailbox(dev, inbox);
return PTR_ERR(outbox);
}
inbuf = inbox->buf;
outbuf = outbox->buf;
inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
inbuf->constant2 = 0x1;
inbuf->reg_id = cpu_to_be16(reg_id);
inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
inbuf->len_const =
cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
((0x3) << 12));
memcpy(inbuf->reg_data, reg_data, reg_len);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_WRAPPED);
if (err)
goto out;
if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
mlx4_err(dev,
"MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
reg_id, err);
goto out;
}
memcpy(reg_data, outbuf->reg_data, reg_len);
out:
mlx4_free_cmd_mailbox(dev, inbox);
mlx4_free_cmd_mailbox(dev, outbox);
return err;
}
/* ConnectX registers IDs */
enum mlx4_reg_id {
MLX4_REG_ID_PTYS = 0x5004,
};
/**
* mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
* register
* @dev: mlx4_dev.
* @method: Access method Read/Write.
* @ptys_reg: PTYS register data pointer.
*
* Access ConnectX PTYS register, to Read/Write Port Type/Speed
* configuration
* Returns 0 on success or a negative error code.
*/
int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
enum mlx4_access_reg_method method,
struct mlx4_ptys_reg *ptys_reg)
{
return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
method, sizeof(*ptys_reg), ptys_reg);
}
EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct mlx4_access_reg *inbuf = inbox->buf;
u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
u16 reg_id = be16_to_cpu(inbuf->reg_id);
if (slave != mlx4_master_func_num(dev) &&
method == MLX4_ACCESS_REG_WRITE)
return -EPERM;
if (reg_id == MLX4_REG_ID_PTYS) {
struct mlx4_ptys_reg *ptys_reg =
(struct mlx4_ptys_reg *)inbuf->reg_data;
ptys_reg->local_port =
mlx4_slave_convert_port(dev, slave,
ptys_reg->local_port);
}
return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
}
static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
{
#define SET_PORT_GEN_PHV_VALID 0x10
#define SET_PORT_GEN_PHV_EN 0x80
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
context->flags2 |= SET_PORT_GEN_PHV_VALID;
if (phv_bit)
context->phv_en |= SET_PORT_GEN_PHV_EN;
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
{
int err;
struct mlx4_func_cap func_cap;
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
if (!err)
*phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT;
return err;
}
EXPORT_SYMBOL(get_phv_bit);
int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
{
int ret;
if (mlx4_is_slave(dev))
return -EPERM;
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
if (!ret)
dev->caps.phv_bit[port] = new_val;
return ret;
}
return -EOPNOTSUPP;
}
EXPORT_SYMBOL(set_phv_bit);
int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
bool *vlan_offload_disabled)
{
struct mlx4_func_cap func_cap;
int err;
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
if (!err)
*vlan_offload_disabled =
!!(func_cap.flags0 &
QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE);
return err;
}
EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled);
void mlx4_replace_zero_macs(struct mlx4_dev *dev)
{
int i;
u8 mac_addr[ETH_ALEN];
dev->port_random_macs = 0;
for (i = 1; i <= dev->caps.num_ports; ++i)
if (!dev->caps.def_mac[i] &&
dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
eth_random_addr(mac_addr);
dev->port_random_macs |= 1 << i;
dev->caps.def_mac[i] = ether_addr_to_u64(mac_addr);
}
}
EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/fw.c
|
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/bitmap.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include "mlx4.h"
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
{
u32 obj;
spin_lock(&bitmap->lock);
obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
obj = find_first_zero_bit(bitmap->table, bitmap->max);
}
if (obj < bitmap->max) {
set_bit(obj, bitmap->table);
bitmap->last = (obj + 1);
if (bitmap->last == bitmap->max)
bitmap->last = 0;
obj |= bitmap->top;
} else
obj = -1;
if (obj != -1)
--bitmap->avail;
spin_unlock(&bitmap->lock);
return obj;
}
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr)
{
mlx4_bitmap_free_range(bitmap, obj, 1, use_rr);
}
static unsigned long find_aligned_range(unsigned long *bitmap,
u32 start, u32 nbits,
int len, int align, u32 skip_mask)
{
unsigned long end, i;
again:
start = ALIGN(start, align);
while ((start < nbits) && (test_bit(start, bitmap) ||
(start & skip_mask)))
start += align;
if (start >= nbits)
return -1;
end = start+len;
if (end > nbits)
return -1;
for (i = start + 1; i < end; i++) {
if (test_bit(i, bitmap) || ((u32)i & skip_mask)) {
start = i + 1;
goto again;
}
}
return start;
}
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
int align, u32 skip_mask)
{
u32 obj;
if (likely(cnt == 1 && align == 1 && !skip_mask))
return mlx4_bitmap_alloc(bitmap);
spin_lock(&bitmap->lock);
obj = find_aligned_range(bitmap->table, bitmap->last,
bitmap->max, cnt, align, skip_mask);
if (obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
obj = find_aligned_range(bitmap->table, 0, bitmap->max,
cnt, align, skip_mask);
}
if (obj < bitmap->max) {
bitmap_set(bitmap->table, obj, cnt);
if (obj == bitmap->last) {
bitmap->last = (obj + cnt);
if (bitmap->last >= bitmap->max)
bitmap->last = 0;
}
obj |= bitmap->top;
} else
obj = -1;
if (obj != -1)
bitmap->avail -= cnt;
spin_unlock(&bitmap->lock);
return obj;
}
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
{
return bitmap->avail;
}
static u32 mlx4_bitmap_masked_value(struct mlx4_bitmap *bitmap, u32 obj)
{
return obj & (bitmap->max + bitmap->reserved_top - 1);
}
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
int use_rr)
{
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
if (!use_rr) {
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
}
bitmap_clear(bitmap->table, obj, cnt);
bitmap->avail += cnt;
spin_unlock(&bitmap->lock);
}
int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
/* num must be a power of 2 */
if (num != roundup_pow_of_two(num))
return -EINVAL;
bitmap->last = 0;
bitmap->top = 0;
bitmap->max = num - reserved_top;
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
bitmap->avail = num - reserved_top - reserved_bot;
bitmap->effective_len = bitmap->avail;
spin_lock_init(&bitmap->lock);
bitmap->table = bitmap_zalloc(bitmap->max, GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
bitmap_set(bitmap->table, 0, reserved_bot);
return 0;
}
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
{
bitmap_free(bitmap->table);
}
struct mlx4_zone_allocator {
struct list_head entries;
struct list_head prios;
u32 last_uid;
u32 mask;
/* protect the zone_allocator from concurrent accesses */
spinlock_t lock;
enum mlx4_zone_alloc_flags flags;
};
struct mlx4_zone_entry {
struct list_head list;
struct list_head prio_list;
u32 uid;
struct mlx4_zone_allocator *allocator;
struct mlx4_bitmap *bitmap;
int use_rr;
int priority;
int offset;
enum mlx4_zone_flags flags;
};
struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags)
{
struct mlx4_zone_allocator *zones = kmalloc(sizeof(*zones), GFP_KERNEL);
if (NULL == zones)
return NULL;
INIT_LIST_HEAD(&zones->entries);
INIT_LIST_HEAD(&zones->prios);
spin_lock_init(&zones->lock);
zones->last_uid = 0;
zones->mask = 0;
zones->flags = flags;
return zones;
}
int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
struct mlx4_bitmap *bitmap,
u32 flags,
int priority,
int offset,
u32 *puid)
{
u32 mask = mlx4_bitmap_masked_value(bitmap, (u32)-1);
struct mlx4_zone_entry *it;
struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL);
if (NULL == zone)
return -ENOMEM;
zone->flags = flags;
zone->bitmap = bitmap;
zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
zone->priority = priority;
zone->offset = offset;
spin_lock(&zone_alloc->lock);
zone->uid = zone_alloc->last_uid++;
zone->allocator = zone_alloc;
if (zone_alloc->mask < mask)
zone_alloc->mask = mask;
list_for_each_entry(it, &zone_alloc->prios, prio_list)
if (it->priority >= priority)
break;
if (&it->prio_list == &zone_alloc->prios || it->priority > priority)
list_add_tail(&zone->prio_list, &it->prio_list);
list_add_tail(&zone->list, &it->list);
spin_unlock(&zone_alloc->lock);
*puid = zone->uid;
return 0;
}
/* Should be called under a lock */
static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
{
struct mlx4_zone_allocator *zone_alloc = entry->allocator;
if (!list_empty(&entry->prio_list)) {
/* Check if we need to add an alternative node to the prio list */
if (!list_is_last(&entry->list, &zone_alloc->entries)) {
struct mlx4_zone_entry *next = list_first_entry(&entry->list,
typeof(*next),
list);
if (next->priority == entry->priority)
list_add_tail(&next->prio_list, &entry->prio_list);
}
list_del(&entry->prio_list);
}
list_del(&entry->list);
if (zone_alloc->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP) {
u32 mask = 0;
struct mlx4_zone_entry *it;
list_for_each_entry(it, &zone_alloc->prios, prio_list) {
u32 cur_mask = mlx4_bitmap_masked_value(it->bitmap, (u32)-1);
if (mask < cur_mask)
mask = cur_mask;
}
zone_alloc->mask = mask;
}
}
void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
{
struct mlx4_zone_entry *zone, *tmp;
spin_lock(&zone_alloc->lock);
list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
list_del(&zone->list);
list_del(&zone->prio_list);
kfree(zone);
}
spin_unlock(&zone_alloc->lock);
kfree(zone_alloc);
}
/* Should be called under a lock */
static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
int align, u32 skip_mask, u32 *puid)
{
u32 uid = 0;
u32 res;
struct mlx4_zone_allocator *zone_alloc = zone->allocator;
struct mlx4_zone_entry *curr_node;
res = mlx4_bitmap_alloc_range(zone->bitmap, count,
align, skip_mask);
if (res != (u32)-1) {
res += zone->offset;
uid = zone->uid;
goto out;
}
list_for_each_entry(curr_node, &zone_alloc->prios, prio_list) {
if (unlikely(curr_node->priority == zone->priority))
break;
}
if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
struct mlx4_zone_entry *it = curr_node;
list_for_each_entry_continue_reverse(it, &zone_alloc->entries, list) {
res = mlx4_bitmap_alloc_range(it->bitmap, count,
align, skip_mask);
if (res != (u32)-1) {
res += it->offset;
uid = it->uid;
goto out;
}
}
}
if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
struct mlx4_zone_entry *it = curr_node;
list_for_each_entry_from(it, &zone_alloc->entries, list) {
if (unlikely(it == zone))
continue;
if (unlikely(it->priority != curr_node->priority))
break;
res = mlx4_bitmap_alloc_range(it->bitmap, count,
align, skip_mask);
if (res != (u32)-1) {
res += it->offset;
uid = it->uid;
goto out;
}
}
}
if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
if (list_is_last(&curr_node->prio_list, &zone_alloc->prios))
goto out;
curr_node = list_first_entry(&curr_node->prio_list,
typeof(*curr_node),
prio_list);
list_for_each_entry_from(curr_node, &zone_alloc->entries, list) {
res = mlx4_bitmap_alloc_range(curr_node->bitmap, count,
align, skip_mask);
if (res != (u32)-1) {
res += curr_node->offset;
uid = curr_node->uid;
goto out;
}
}
}
out:
if (NULL != puid && res != (u32)-1)
*puid = uid;
return res;
}
/* Should be called under a lock */
static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
u32 count)
{
mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
}
/* Should be called under a lock */
static struct mlx4_zone_entry *__mlx4_find_zone_by_uid(
struct mlx4_zone_allocator *zones, u32 uid)
{
struct mlx4_zone_entry *zone;
list_for_each_entry(zone, &zones->entries, list) {
if (zone->uid == uid)
return zone;
}
return NULL;
}
struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid)
{
struct mlx4_zone_entry *zone;
struct mlx4_bitmap *bitmap;
spin_lock(&zones->lock);
zone = __mlx4_find_zone_by_uid(zones, uid);
bitmap = zone == NULL ? NULL : zone->bitmap;
spin_unlock(&zones->lock);
return bitmap;
}
int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
{
struct mlx4_zone_entry *zone;
int res = 0;
spin_lock(&zones->lock);
zone = __mlx4_find_zone_by_uid(zones, uid);
if (NULL == zone) {
res = -1;
goto out;
}
__mlx4_zone_remove_one_entry(zone);
out:
spin_unlock(&zones->lock);
kfree(zone);
return res;
}
/* Should be called under a lock */
static struct mlx4_zone_entry *__mlx4_find_zone_by_uid_unique(
struct mlx4_zone_allocator *zones, u32 obj)
{
struct mlx4_zone_entry *zone, *zone_candidate = NULL;
u32 dist = (u32)-1;
/* Search for the smallest zone that this obj could be
* allocated from. This is done in order to handle
* situations when small bitmaps are allocated from bigger
* bitmaps (and the allocated space is marked as reserved in
* the bigger bitmap.
*/
list_for_each_entry(zone, &zones->entries, list) {
if (obj >= zone->offset) {
u32 mobj = (obj - zone->offset) & zones->mask;
if (mobj < zone->bitmap->max) {
u32 curr_dist = zone->bitmap->effective_len;
if (curr_dist < dist) {
dist = curr_dist;
zone_candidate = zone;
}
}
}
}
return zone_candidate;
}
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
int align, u32 skip_mask, u32 *puid)
{
struct mlx4_zone_entry *zone;
int res = -1;
spin_lock(&zones->lock);
zone = __mlx4_find_zone_by_uid(zones, uid);
if (NULL == zone)
goto out;
res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
out:
spin_unlock(&zones->lock);
return res;
}
u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count)
{
struct mlx4_zone_entry *zone;
int res = 0;
spin_lock(&zones->lock);
zone = __mlx4_find_zone_by_uid(zones, uid);
if (NULL == zone) {
res = -1;
goto out;
}
__mlx4_free_from_zone(zone, obj, count);
out:
spin_unlock(&zones->lock);
return res;
}
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count)
{
struct mlx4_zone_entry *zone;
int res;
if (!(zones->flags & MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP))
return -EFAULT;
spin_lock(&zones->lock);
zone = __mlx4_find_zone_by_uid_unique(zones, obj);
if (NULL == zone) {
res = -1;
goto out;
}
__mlx4_free_from_zone(zone, obj, count);
res = 0;
out:
spin_unlock(&zones->lock);
return res;
}
static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
struct mlx4_buf *buf)
{
dma_addr_t t;
buf->nbufs = 1;
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf =
dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
buf->npages *= 2;
}
return 0;
}
/* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0. If the
* requested size is > max_direct, we split the allocation into
* multiple pages, so we don't require too much contiguous memory.
*/
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf)
{
if (size <= max_direct) {
return mlx4_buf_direct_alloc(dev, size, buf);
} else {
dma_addr_t t;
int i;
buf->direct.buf = NULL;
buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE);
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
dma_alloc_coherent(&dev->persist->pdev->dev,
PAGE_SIZE, &t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
}
}
return 0;
err_free:
mlx4_buf_free(dev, size, buf);
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
{
if (buf->nbufs == 1) {
dma_free_coherent(&dev->persist->pdev->dev, size,
buf->direct.buf, buf->direct.map);
} else {
int i;
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(&dev->persist->pdev->dev,
PAGE_SIZE,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
{
struct mlx4_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1;
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
&pgdir->db_dma, GFP_KERNEL);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
}
return pgdir;
}
static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
struct mlx4_db *db, int order)
{
int o;
int i;
for (o = order; o <= 1; ++o) {
i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
if (i < MLX4_DB_PER_PAGE >> o)
goto found;
}
return -ENOMEM;
found:
clear_bit(i, pgdir->bits[o]);
i <<= o;
if (o > order)
set_bit(i ^ 1, pgdir->bits[order]);
db->u.pgdir = pgdir;
db->index = i;
db->db = pgdir->db_page + db->index;
db->dma = pgdir->db_dma + db->index * 4;
db->order = order;
return 0;
}
int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
int ret = 0;
mutex_lock(&priv->pgdir_mutex);
list_for_each_entry(pgdir, &priv->pgdir_list, list)
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out;
pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
if (!pgdir) {
ret = -ENOMEM;
goto out;
}
list_add(&pgdir->list, &priv->pgdir_list);
/* This should never fail -- we just allocated an empty page: */
WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
out:
mutex_unlock(&priv->pgdir_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mlx4_db_alloc);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int o;
int i;
mutex_lock(&priv->pgdir_mutex);
o = db->order;
i = db->index;
if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
clear_bit(i ^ 1, db->u.pgdir->order0);
++o;
}
i >>= o;
set_bit(i, db->u.pgdir->bits[o]);
if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
kfree(db->u.pgdir);
}
mutex_unlock(&priv->pgdir_mutex);
}
EXPORT_SYMBOL_GPL(mlx4_db_free);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
int size)
{
int err;
err = mlx4_db_alloc(dev, &wqres->db, 1);
if (err)
return err;
*wqres->db.db = 0;
err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
if (err)
goto err_db;
err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
&wqres->mtt);
if (err)
goto err_buf;
err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
if (err)
goto err_mtt;
return 0;
err_mtt:
mlx4_mtt_cleanup(dev, &wqres->mtt);
err_buf:
mlx4_buf_free(dev, size, &wqres->buf);
err_db:
mlx4_db_free(dev, &wqres->db);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
int size)
{
mlx4_mtt_cleanup(dev, &wqres->mtt);
mlx4_buf_free(dev, size, &wqres->buf);
mlx4_db_free(dev, &wqres->db);
}
EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/alloc.c
|
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include "mlx4.h"
int mlx4_reset(struct mlx4_dev *dev)
{
void __iomem *reset;
u32 *hca_header = NULL;
int pcie_cap;
u16 devctl;
u16 linkctl;
u16 vendor;
unsigned long end;
u32 sem;
int i;
int err = 0;
#define MLX4_RESET_BASE 0xf0000
#define MLX4_RESET_SIZE 0x400
#define MLX4_SEM_OFFSET 0x3fc
#define MLX4_RESET_OFFSET 0x10
#define MLX4_RESET_VALUE swab32(1)
#define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ)
#define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ)
/*
* Reset the chip. This is somewhat ugly because we have to
* save off the PCI header before reset and then restore it
* after the chip reboots. We skip config space offsets 22
* and 23 since those have a special meaning.
*/
/* Do we need to save off the full 4K PCI Express header?? */
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
goto out;
}
pcie_cap = pci_pcie_cap(dev->persist->pdev);
for (i = 0; i < 64; ++i) {
if (i == 22 || i == 23)
continue;
if (pci_read_config_dword(dev->persist->pdev, i * 4,
hca_header + i)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
goto out;
}
}
reset = ioremap(pci_resource_start(dev->persist->pdev, 0) +
MLX4_RESET_BASE,
MLX4_RESET_SIZE);
if (!reset) {
err = -ENOMEM;
mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
goto out;
}
/* grab HW semaphore to lock out flash updates */
end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES;
do {
sem = readl(reset + MLX4_SEM_OFFSET);
if (!sem)
break;
msleep(1);
} while (time_before(jiffies, end));
if (sem) {
mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n");
err = -EAGAIN;
iounmap(reset);
goto out;
}
/* actually hit reset */
writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET);
iounmap(reset);
/* Docs say to wait one second before accessing device */
msleep(1000);
end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
do {
if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID,
&vendor) && vendor != 0xffff)
break;
msleep(1);
} while (time_before(jiffies, end));
if (vendor == 0xffff) {
err = -ENODEV;
mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
goto out;
}
/* Now restore the PCI headers */
if (pcie_cap) {
devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
if (pcie_capability_write_word(dev->persist->pdev,
PCI_EXP_DEVCTL,
devctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(dev->persist->pdev,
PCI_EXP_LNKCTL,
linkctl)) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
goto out;
}
}
for (i = 0; i < 16; ++i) {
if (i * 4 == PCI_COMMAND)
continue;
if (pci_write_config_dword(dev->persist->pdev, i * 4,
hca_header[i])) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
i);
goto out;
}
}
if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) {
err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
goto out;
}
out:
kfree(hca_header);
return err;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/reset.c
|
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/device.h>
#include <linux/semaphore.h>
#include <rdma/ib_smi.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <asm/io.h>
#include "mlx4.h"
#include "fw.h"
#include "fw_qos.h"
#include "mlx4_stats.h"
#define CMD_POLL_TOKEN 0xffff
#define INBOX_MASK 0xffffffffffffff00ULL
#define CMD_CHAN_VER 1
#define CMD_CHAN_IF_REV 1
enum {
/* command completed successfully: */
CMD_STAT_OK = 0x00,
/* Internal error (such as a bus error) occurred while processing command: */
CMD_STAT_INTERNAL_ERR = 0x01,
/* Operation/command not supported or opcode modifier not supported: */
CMD_STAT_BAD_OP = 0x02,
/* Parameter not supported or parameter out of range: */
CMD_STAT_BAD_PARAM = 0x03,
/* System not enabled or bad system state: */
CMD_STAT_BAD_SYS_STATE = 0x04,
/* Attempt to access reserved or unallocaterd resource: */
CMD_STAT_BAD_RESOURCE = 0x05,
/* Requested resource is currently executing a command, or is otherwise busy: */
CMD_STAT_RESOURCE_BUSY = 0x06,
/* Required capability exceeds device limits: */
CMD_STAT_EXCEED_LIM = 0x08,
/* Resource is not in the appropriate state or ownership: */
CMD_STAT_BAD_RES_STATE = 0x09,
/* Index out of range: */
CMD_STAT_BAD_INDEX = 0x0a,
/* FW image corrupted: */
CMD_STAT_BAD_NVMEM = 0x0b,
/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
CMD_STAT_ICM_ERROR = 0x0c,
/* Attempt to modify a QP/EE which is not in the presumed state: */
CMD_STAT_BAD_QP_STATE = 0x10,
/* Bad segment parameters (Address/Size): */
CMD_STAT_BAD_SEG_PARAM = 0x20,
/* Memory Region has Memory Windows bound to: */
CMD_STAT_REG_BOUND = 0x21,
/* HCA local attached memory not present: */
CMD_STAT_LAM_NOT_PRE = 0x22,
/* Bad management packet (silently discarded): */
CMD_STAT_BAD_PKT = 0x30,
/* More outstanding CQEs in CQ than new CQ size: */
CMD_STAT_BAD_SIZE = 0x40,
/* Multi Function device support required: */
CMD_STAT_MULTI_FUNC_REQ = 0x50,
};
enum {
HCR_IN_PARAM_OFFSET = 0x00,
HCR_IN_MODIFIER_OFFSET = 0x08,
HCR_OUT_PARAM_OFFSET = 0x0c,
HCR_TOKEN_OFFSET = 0x14,
HCR_STATUS_OFFSET = 0x18,
HCR_OPMOD_SHIFT = 12,
HCR_T_BIT = 21,
HCR_E_BIT = 22,
HCR_GO_BIT = 23
};
enum {
GO_BIT_TIMEOUT_MSECS = 10000
};
enum mlx4_vlan_transition {
MLX4_VLAN_TRANSITION_VST_VST = 0,
MLX4_VLAN_TRANSITION_VST_VGT = 1,
MLX4_VLAN_TRANSITION_VGT_VST = 2,
MLX4_VLAN_TRANSITION_VGT_VGT = 3,
};
struct mlx4_cmd_context {
struct completion done;
int result;
int next;
u64 out_param;
u16 token;
u8 fw_status;
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr_cmd *in_vhcr);
static int mlx4_status_to_errno(u8 status)
{
static const int trans_table[] = {
[CMD_STAT_INTERNAL_ERR] = -EIO,
[CMD_STAT_BAD_OP] = -EPERM,
[CMD_STAT_BAD_PARAM] = -EINVAL,
[CMD_STAT_BAD_SYS_STATE] = -ENXIO,
[CMD_STAT_BAD_RESOURCE] = -EBADF,
[CMD_STAT_RESOURCE_BUSY] = -EBUSY,
[CMD_STAT_EXCEED_LIM] = -ENOMEM,
[CMD_STAT_BAD_RES_STATE] = -EBADF,
[CMD_STAT_BAD_INDEX] = -EBADF,
[CMD_STAT_BAD_NVMEM] = -EFAULT,
[CMD_STAT_ICM_ERROR] = -ENFILE,
[CMD_STAT_BAD_QP_STATE] = -EINVAL,
[CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
[CMD_STAT_REG_BOUND] = -EBUSY,
[CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
[CMD_STAT_BAD_PKT] = -EINVAL,
[CMD_STAT_BAD_SIZE] = -ENOMEM,
[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
};
if (status >= ARRAY_SIZE(trans_table) ||
(status != CMD_STAT_OK && trans_table[status] == 0))
return -EIO;
return trans_table[status];
}
static u8 mlx4_errno_to_status(int errno)
{
switch (errno) {
case -EPERM:
return CMD_STAT_BAD_OP;
case -EINVAL:
return CMD_STAT_BAD_PARAM;
case -ENXIO:
return CMD_STAT_BAD_SYS_STATE;
case -EBUSY:
return CMD_STAT_RESOURCE_BUSY;
case -ENOMEM:
return CMD_STAT_EXCEED_LIM;
case -ENFILE:
return CMD_STAT_ICM_ERROR;
default:
return CMD_STAT_INTERNAL_ERR;
}
}
static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
u8 op_modifier)
{
switch (op) {
case MLX4_CMD_UNMAP_ICM:
case MLX4_CMD_UNMAP_ICM_AUX:
case MLX4_CMD_UNMAP_FA:
case MLX4_CMD_2RST_QP:
case MLX4_CMD_HW2SW_EQ:
case MLX4_CMD_HW2SW_CQ:
case MLX4_CMD_HW2SW_SRQ:
case MLX4_CMD_HW2SW_MPT:
case MLX4_CMD_CLOSE_HCA:
case MLX4_QP_FLOW_STEERING_DETACH:
case MLX4_CMD_FREE_RES:
case MLX4_CMD_CLOSE_PORT:
return CMD_STAT_OK;
case MLX4_CMD_QP_ATTACH:
/* On Detach case return success */
if (op_modifier == 0)
return CMD_STAT_OK;
return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
default:
return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
}
}
static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
{
/* Any error during the closing commands below is considered fatal */
if (op == MLX4_CMD_CLOSE_HCA ||
op == MLX4_CMD_HW2SW_EQ ||
op == MLX4_CMD_HW2SW_CQ ||
op == MLX4_CMD_2RST_QP ||
op == MLX4_CMD_HW2SW_SRQ ||
op == MLX4_CMD_SYNC_TPT ||
op == MLX4_CMD_UNMAP_ICM ||
op == MLX4_CMD_UNMAP_ICM_AUX ||
op == MLX4_CMD_UNMAP_FA)
return 1;
/* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
* CMD_STAT_REG_BOUND.
* This status indicates that memory region has memory windows bound to it
* which may result from invalid user space usage and is not fatal.
*/
if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
return 1;
return 0;
}
static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
int err)
{
/* Only if reset flow is really active return code is based on
* command, otherwise current error code is returned.
*/
if (mlx4_internal_err_reset) {
mlx4_enter_error_state(dev->persist);
err = mlx4_internal_err_ret_value(dev, op, op_modifier);
}
return err;
}
static int comm_pending(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u32 status = readl(&priv->mfunc.comm->slave_read);
return (swab32(status) >> 31) != priv->cmd.comm_toggle;
}
static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u32 val;
/* To avoid writing to unknown addresses after the device state was
* changed to internal error and the function was rest,
* check the INTERNAL_ERROR flag which is updated under
* device_state_mutex lock.
*/
mutex_lock(&dev->persist->device_state_mutex);
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
mutex_unlock(&dev->persist->device_state_mutex);
return -EIO;
}
priv->cmd.comm_toggle ^= 1;
val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
__raw_writel((__force u32) cpu_to_be32(val),
&priv->mfunc.comm->slave_write);
mutex_unlock(&dev->persist->device_state_mutex);
return 0;
}
static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
unsigned long timeout)
{
struct mlx4_priv *priv = mlx4_priv(dev);
unsigned long end;
int err = 0;
int ret_from_pending = 0;
/* First, verify that the master reports correct status */
if (comm_pending(dev)) {
mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
priv->cmd.comm_toggle, cmd);
return -EAGAIN;
}
/* Write command */
down(&priv->cmd.poll_sem);
if (mlx4_comm_cmd_post(dev, cmd, param)) {
/* Only in case the device state is INTERNAL_ERROR,
* mlx4_comm_cmd_post returns with an error
*/
err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
goto out;
}
end = msecs_to_jiffies(timeout) + jiffies;
while (comm_pending(dev) && time_before(jiffies, end))
cond_resched();
ret_from_pending = comm_pending(dev);
if (ret_from_pending) {
/* check if the slave is trying to boot in the middle of
* FLR process. The only non-zero result in the RESET command
* is MLX4_DELAY_RESET_SLAVE*/
if ((MLX4_COMM_CMD_RESET == cmd)) {
err = MLX4_DELAY_RESET_SLAVE;
goto out;
} else {
mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
cmd);
err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
}
}
if (err)
mlx4_enter_error_state(dev->persist);
out:
up(&priv->cmd.poll_sem);
return err;
}
static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
u16 param, u16 op, unsigned long timeout)
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
unsigned long end;
int err = 0;
down(&cmd->event_sem);
spin_lock(&cmd->context_lock);
BUG_ON(cmd->free_head < 0);
context = &cmd->context[cmd->free_head];
context->token += cmd->token_mask + 1;
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
reinit_completion(&context->done);
if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
/* Only in case the device state is INTERNAL_ERROR,
* mlx4_comm_cmd_post returns with an error
*/
err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
goto out;
}
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
vhcr_cmd, op);
goto out_reset;
}
err = context->result;
if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
vhcr_cmd, context->fw_status);
if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
goto out_reset;
}
/* wait for comm channel ready
* this is necessary for prevention the race
* when switching between event to polling mode
* Skipping this section in case the device is in FATAL_ERROR state,
* In this state, no commands are sent via the comm channel until
* the device has returned from reset.
*/
if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
end = msecs_to_jiffies(timeout) + jiffies;
while (comm_pending(dev) && time_before(jiffies, end))
cond_resched();
}
goto out;
out_reset:
err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
mlx4_enter_error_state(dev->persist);
out:
spin_lock(&cmd->context_lock);
context->next = cmd->free_head;
cmd->free_head = context - cmd->context;
spin_unlock(&cmd->context_lock);
up(&cmd->event_sem);
return err;
}
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
u16 op, unsigned long timeout)
{
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
if (mlx4_priv(dev)->cmd.use_events)
return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
}
static int cmd_pending(struct mlx4_dev *dev)
{
u32 status;
if (pci_channel_offline(dev->persist->pdev))
return -EIO;
status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
return (status & swab32(1 << HCR_GO_BIT)) ||
(mlx4_priv(dev)->cmd.toggle ==
!!(status & swab32(1 << HCR_T_BIT)));
}
static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
u32 in_modifier, u8 op_modifier, u16 op, u16 token,
int event)
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
u32 __iomem *hcr = cmd->hcr;
int ret = -EIO;
unsigned long end;
mutex_lock(&dev->persist->device_state_mutex);
/* To avoid writing to unknown addresses after the device state was
* changed to internal error and the chip was reset,
* check the INTERNAL_ERROR flag which is updated under
* device_state_mutex lock.
*/
if (pci_channel_offline(dev->persist->pdev) ||
(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
/*
* Device is going through error recovery
* and cannot accept commands.
*/
goto out;
}
end = jiffies;
if (event)
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
if (pci_channel_offline(dev->persist->pdev)) {
/*
* Device is going through error recovery
* and cannot accept commands.
*/
goto out;
}
if (time_after_eq(jiffies, end)) {
mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
goto out;
}
cond_resched();
}
/*
* We use writel (instead of something like memcpy_toio)
* because writes of less than 32 bits to the HCR don't work
* (and some architectures such as ia64 implement memcpy_toio
* in terms of writeb).
*/
__raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
__raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
__raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
__raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
/* __raw_writel may not order writes. */
wmb();
__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
(cmd->toggle << HCR_T_BIT) |
(event ? (1 << HCR_E_BIT) : 0) |
(op_modifier << HCR_OPMOD_SHIFT) |
op), hcr + 6);
cmd->toggle = cmd->toggle ^ 1;
ret = 0;
out:
if (ret)
mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
op, ret, in_param, in_modifier, op_modifier);
mutex_unlock(&dev->persist->device_state_mutex);
return ret;
}
static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
int ret;
mutex_lock(&priv->cmd.slave_cmd_mutex);
vhcr->in_param = cpu_to_be64(in_param);
vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
vhcr->in_modifier = cpu_to_be32(in_modifier);
vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
vhcr->status = 0;
vhcr->flags = !!(priv->cmd.use_events) << 6;
if (mlx4_is_master(dev)) {
ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
if (!ret) {
if (out_is_imm) {
if (out_param)
*out_param =
be64_to_cpu(vhcr->out_param);
else {
mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
ret = mlx4_status_to_errno(vhcr->status);
}
if (ret &&
dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
} else {
ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
MLX4_COMM_TIME + timeout);
if (!ret) {
if (out_is_imm) {
if (out_param)
*out_param =
be64_to_cpu(vhcr->out_param);
else {
mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
ret = mlx4_status_to_errno(vhcr->status);
} else {
if (dev->persist->state &
MLX4_DEVICE_STATE_INTERNAL_ERROR)
ret = mlx4_internal_err_ret_value(dev, op,
op_modifier);
else
mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
}
}
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return ret;
}
static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
{
struct mlx4_priv *priv = mlx4_priv(dev);
void __iomem *hcr = priv->cmd.hcr;
int err = 0;
unsigned long end;
u32 stat;
down(&priv->cmd.poll_sem);
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
/*
* Device is going through error recovery
* and cannot accept commands.
*/
err = mlx4_internal_err_ret_value(dev, op, op_modifier);
goto out;
}
if (out_is_imm && !out_param) {
mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
op);
err = -EINVAL;
goto out;
}
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
if (err)
goto out_reset;
end = msecs_to_jiffies(timeout) + jiffies;
while (cmd_pending(dev) && time_before(jiffies, end)) {
if (pci_channel_offline(dev->persist->pdev)) {
/*
* Device is going through error recovery
* and cannot accept commands.
*/
err = -EIO;
goto out_reset;
}
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
err = mlx4_internal_err_ret_value(dev, op, op_modifier);
goto out;
}
cond_resched();
}
if (cmd_pending(dev)) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
err = -EIO;
goto out_reset;
}
if (out_is_imm)
*out_param =
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
stat = be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
err = mlx4_status_to_errno(stat);
if (err) {
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
op, stat);
if (mlx4_closing_cmd_fatal_error(op, stat))
goto out_reset;
goto out;
}
out_reset:
if (err)
err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
out:
up(&priv->cmd.poll_sem);
return err;
}
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_context *context =
&priv->cmd.context[token & priv->cmd.token_mask];
/* previously timed out command completing at long last */
if (token != context->token)
return;
context->fw_status = status;
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
complete(&context->done);
}
static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
{
struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
struct mlx4_cmd_context *context;
long ret_wait;
int err = 0;
down(&cmd->event_sem);
spin_lock(&cmd->context_lock);
BUG_ON(cmd->free_head < 0);
context = &cmd->context[cmd->free_head];
context->token += cmd->token_mask + 1;
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
if (out_is_imm && !out_param) {
mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
op);
err = -EINVAL;
goto out;
}
reinit_completion(&context->done);
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, context->token, 1);
if (err)
goto out_reset;
if (op == MLX4_CMD_SENSE_PORT) {
ret_wait =
wait_for_completion_interruptible_timeout(&context->done,
msecs_to_jiffies(timeout));
if (ret_wait < 0) {
context->fw_status = 0;
context->out_param = 0;
context->result = 0;
}
} else {
ret_wait = (long)wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout));
}
if (!ret_wait) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
if (op == MLX4_CMD_NOP) {
err = -EBUSY;
goto out;
} else {
err = -EIO;
goto out_reset;
}
}
err = context->result;
if (err) {
/* Since we do not want to have this error message always
* displayed at driver start when there are ConnectX2 HCAs
* on the host, we deprecate the error message for this
* specific command/input_mod/opcode_mod/fw-status to be debug.
*/
if (op == MLX4_CMD_SET_PORT &&
(in_modifier == 1 || in_modifier == 2) &&
op_modifier == MLX4_SET_PORT_IB_OPCODE &&
context->fw_status == CMD_STAT_BAD_SIZE)
mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status);
else
mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status);
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
err = mlx4_internal_err_ret_value(dev, op, op_modifier);
else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
goto out_reset;
goto out;
}
if (out_is_imm)
*out_param = context->out_param;
out_reset:
if (err)
err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
out:
spin_lock(&cmd->context_lock);
context->next = cmd->free_head;
cmd->free_head = context - cmd->context;
spin_unlock(&cmd->context_lock);
up(&cmd->event_sem);
return err;
}
int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout, int native)
{
if (pci_channel_offline(dev->persist->pdev))
return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
int ret;
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
return mlx4_internal_err_ret_value(dev, op,
op_modifier);
down_read(&mlx4_priv(dev)->cmd.switch_sem);
if (mlx4_priv(dev)->cmd.use_events)
ret = mlx4_cmd_wait(dev, in_param, out_param,
out_is_imm, in_modifier,
op_modifier, op, timeout);
else
ret = mlx4_cmd_poll(dev, in_param, out_param,
out_is_imm, in_modifier,
op_modifier, op, timeout);
up_read(&mlx4_priv(dev)->cmd.switch_sem);
return ret;
}
return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
in_modifier, op_modifier, op, timeout);
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);
int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
int slave, u64 slave_addr,
int size, int is_read)
{
u64 in_param;
u64 out_param;
if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
(slave & ~0x7f) | (size & 0xff)) {
mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
slave_addr, master_addr, slave, size);
return -EINVAL;
}
if (is_read) {
in_param = (u64) slave | slave_addr;
out_param = (u64) dev->caps.function | master_addr;
} else {
in_param = (u64) dev->caps.function | master_addr;
out_param = (u64) slave | slave_addr;
}
return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
MLX4_CMD_ACCESS_MEM,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox)
{
struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
int err;
int i;
if (index & 0x1f)
return -EINVAL;
in_mad->attr_mod = cpu_to_be32(index / 32);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err)
return err;
for (i = 0; i < 32; ++i)
pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
return err;
}
static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox)
{
int i;
int err;
for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
if (err)
return err;
}
return 0;
}
#define PORT_CAPABILITY_LOCATION_IN_SMP 20
#define PORT_STATE_OFFSET 32
static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
{
if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
return IB_PORT_ACTIVE;
else
return IB_PORT_DOWN;
}
static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
struct ib_smp *smp = inbox->buf;
u32 index;
u8 port, slave_port;
u8 opcode_modifier;
u16 *table;
int err;
int vidx, pidx;
int network_view;
struct mlx4_priv *priv = mlx4_priv(dev);
struct ib_smp *outsmp = outbox->buf;
__be16 *outtab = (__be16 *)(outsmp->data);
__be32 slave_cap_mask;
__be64 slave_node_guid;
slave_port = vhcr->in_modifier;
port = mlx4_slave_convert_port(dev, slave, slave_port);
/* network-view bit is for driver use only, and should not be passed to FW */
opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
network_view = !!(vhcr->op_modifier & 0x8);
if (smp->base_version == 1 &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
smp->class_version == 1) {
/* host view is paravirtualized */
if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
index = be32_to_cpu(smp->attr_mod);
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
sizeof(*table) * 32, GFP_KERNEL);
if (!table)
return -ENOMEM;
/* need to get the full pkey table because the paravirtualized
* pkeys may be scattered among several pkey blocks.
*/
err = get_full_pkey_table(dev, port, table, inbox, outbox);
if (!err) {
for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
outtab[vidx % 32] = cpu_to_be16(table[pidx]);
}
}
kfree(table);
return err;
}
if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
/*get the slave specific caps:*/
/*do the command */
smp->attr_mod = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
/* modify the response for slaves */
if (!err && slave != mlx4_master_func_num(dev)) {
u8 *state = outsmp->data + PORT_STATE_OFFSET;
*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
}
return err;
}
if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
__be64 guid = mlx4_get_admin_guid(dev, slave,
port);
/* set the PF admin guid to the FW/HW burned
* GUID, if it wasn't yet set
*/
if (slave == 0 && guid == 0) {
smp->attr_mod = 0;
err = mlx4_cmd_box(dev,
inbox->dma,
outbox->dma,
vhcr->in_modifier,
opcode_modifier,
vhcr->op,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err)
return err;
mlx4_set_admin_guid(dev,
*(__be64 *)outsmp->
data, slave, port);
} else {
memcpy(outsmp->data, &guid, 8);
}
/* clean all other gids */
memset(outsmp->data + 8, 0, 56);
return 0;
}
if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
port, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (!err) {
slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
memcpy(outsmp->data + 12, &slave_node_guid, 8);
}
return err;
}
}
}
/* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
* These are the MADs used by ib verbs (such as ib_query_gids).
*/
if (slave != mlx4_master_func_num(dev) &&
!mlx4_vf_smi_enabled(dev, slave, port)) {
if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
smp->method == IB_MGMT_METHOD_GET) || network_view) {
mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
slave, smp->mgmt_class, smp->method,
network_view ? "Network" : "Host",
be16_to_cpu(smp->attr_id));
return -EPERM;
}
}
return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
vhcr->in_modifier, opcode_modifier,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
return -EPERM;
}
int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
u64 in_param;
u64 out_param;
int err;
in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
if (cmd->encode_slave_id) {
in_param &= 0xffffffffffffff00ll;
in_param |= slave;
}
err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (cmd->out_is_imm)
vhcr->out_param = out_param;
return err;
}
static struct mlx4_cmd_info cmd_info[] = {
{
.opcode = MLX4_CMD_QUERY_FW,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QUERY_FW_wrapper
},
{
.opcode = MLX4_CMD_QUERY_HCA,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL
},
{
.opcode = MLX4_CMD_QUERY_DEV_CAP,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QUERY_DEV_CAP_wrapper
},
{
.opcode = MLX4_CMD_QUERY_FUNC_CAP,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
},
{
.opcode = MLX4_CMD_QUERY_ADAPTER,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL
},
{
.opcode = MLX4_CMD_INIT_PORT,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_INIT_PORT_wrapper
},
{
.opcode = MLX4_CMD_CLOSE_PORT,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CLOSE_PORT_wrapper
},
{
.opcode = MLX4_CMD_QUERY_PORT,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QUERY_PORT_wrapper
},
{
.opcode = MLX4_CMD_SET_PORT,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_SET_PORT_wrapper
},
{
.opcode = MLX4_CMD_MAP_EQ,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_MAP_EQ_wrapper
},
{
.opcode = MLX4_CMD_SW2HW_EQ,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = true,
.verify = NULL,
.wrapper = mlx4_SW2HW_EQ_wrapper
},
{
.opcode = MLX4_CMD_HW_HEALTH_CHECK,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL
},
{
.opcode = MLX4_CMD_NOP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL
},
{
.opcode = MLX4_CMD_CONFIG_DEV,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CONFIG_DEV_wrapper
},
{
.opcode = MLX4_CMD_ALLOC_RES,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = true,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_ALLOC_RES_wrapper
},
{
.opcode = MLX4_CMD_FREE_RES,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_FREE_RES_wrapper
},
{
.opcode = MLX4_CMD_SW2HW_MPT,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = true,
.verify = NULL,
.wrapper = mlx4_SW2HW_MPT_wrapper
},
{
.opcode = MLX4_CMD_QUERY_MPT,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QUERY_MPT_wrapper
},
{
.opcode = MLX4_CMD_HW2SW_MPT,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_HW2SW_MPT_wrapper
},
{
.opcode = MLX4_CMD_READ_MTT,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL
},
{
.opcode = MLX4_CMD_WRITE_MTT,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_WRITE_MTT_wrapper
},
{
.opcode = MLX4_CMD_SYNC_TPT,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL
},
{
.opcode = MLX4_CMD_HW2SW_EQ,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = true,
.verify = NULL,
.wrapper = mlx4_HW2SW_EQ_wrapper
},
{
.opcode = MLX4_CMD_QUERY_EQ,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = true,
.verify = NULL,
.wrapper = mlx4_QUERY_EQ_wrapper
},
{
.opcode = MLX4_CMD_SW2HW_CQ,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = true,
.verify = NULL,
.wrapper = mlx4_SW2HW_CQ_wrapper
},
{
.opcode = MLX4_CMD_HW2SW_CQ,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_HW2SW_CQ_wrapper
},
{
.opcode = MLX4_CMD_QUERY_CQ,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QUERY_CQ_wrapper
},
{
.opcode = MLX4_CMD_MODIFY_CQ,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = true,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_MODIFY_CQ_wrapper
},
{
.opcode = MLX4_CMD_SW2HW_SRQ,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = true,
.verify = NULL,
.wrapper = mlx4_SW2HW_SRQ_wrapper
},
{
.opcode = MLX4_CMD_HW2SW_SRQ,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_HW2SW_SRQ_wrapper
},
{
.opcode = MLX4_CMD_QUERY_SRQ,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QUERY_SRQ_wrapper
},
{
.opcode = MLX4_CMD_ARM_SRQ,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_ARM_SRQ_wrapper
},
{
.opcode = MLX4_CMD_RST2INIT_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = true,
.verify = NULL,
.wrapper = mlx4_RST2INIT_QP_wrapper
},
{
.opcode = MLX4_CMD_INIT2INIT_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_INIT2INIT_QP_wrapper
},
{
.opcode = MLX4_CMD_INIT2RTR_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_INIT2RTR_QP_wrapper
},
{
.opcode = MLX4_CMD_RTR2RTS_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_RTR2RTS_QP_wrapper
},
{
.opcode = MLX4_CMD_RTS2RTS_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_RTS2RTS_QP_wrapper
},
{
.opcode = MLX4_CMD_SQERR2RTS_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_SQERR2RTS_QP_wrapper
},
{
.opcode = MLX4_CMD_2ERR_QP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_GEN_QP_wrapper
},
{
.opcode = MLX4_CMD_RTS2SQD_QP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_GEN_QP_wrapper
},
{
.opcode = MLX4_CMD_SQD2SQD_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_SQD2SQD_QP_wrapper
},
{
.opcode = MLX4_CMD_SQD2RTS_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_SQD2RTS_QP_wrapper
},
{
.opcode = MLX4_CMD_2RST_QP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_2RST_QP_wrapper
},
{
.opcode = MLX4_CMD_QUERY_QP,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_GEN_QP_wrapper
},
{
.opcode = MLX4_CMD_SUSPEND_QP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_GEN_QP_wrapper
},
{
.opcode = MLX4_CMD_UNSUSPEND_QP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_GEN_QP_wrapper
},
{
.opcode = MLX4_CMD_UPDATE_QP,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_UPDATE_QP_wrapper
},
{
.opcode = MLX4_CMD_GET_OP_REQ,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper,
},
{
.opcode = MLX4_CMD_ALLOCATE_VPP,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper,
},
{
.opcode = MLX4_CMD_SET_VPORT_QOS,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper,
},
{
.opcode = MLX4_CMD_CONF_SPECIAL_QP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL, /* XXX verify: only demux can do this */
.wrapper = NULL
},
{
.opcode = MLX4_CMD_MAD_IFC,
.has_inbox = true,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_MAD_IFC_wrapper
},
{
.opcode = MLX4_CMD_MAD_DEMUX,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper
},
{
.opcode = MLX4_CMD_QUERY_IF_STAT,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QUERY_IF_STAT_wrapper
},
{
.opcode = MLX4_CMD_ACCESS_REG,
.has_inbox = true,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_ACCESS_REG_wrapper,
},
{
.opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper,
},
/* Native multicast commands are not available for guests */
{
.opcode = MLX4_CMD_QP_ATTACH,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QP_ATTACH_wrapper
},
{
.opcode = MLX4_CMD_PROMISC,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_PROMISC_wrapper
},
/* Ethernet specific commands */
{
.opcode = MLX4_CMD_SET_VLAN_FLTR,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_SET_VLAN_FLTR_wrapper
},
{
.opcode = MLX4_CMD_SET_MCAST_FLTR,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_SET_MCAST_FLTR_wrapper
},
{
.opcode = MLX4_CMD_DUMP_ETH_STATS,
.has_inbox = false,
.has_outbox = true,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_DUMP_ETH_STATS_wrapper
},
{
.opcode = MLX4_CMD_INFORM_FLR_DONE,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = NULL
},
/* flow steering commands */
{
.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
.has_inbox = true,
.has_outbox = false,
.out_is_imm = true,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
},
{
.opcode = MLX4_QP_FLOW_STEERING_DETACH,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
},
{
.opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper
},
{
.opcode = MLX4_CMD_VIRT_PORT_MAP,
.has_inbox = false,
.has_outbox = false,
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
.wrapper = mlx4_CMD_EPERM_wrapper
},
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr_cmd *in_vhcr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_info *cmd = NULL;
struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
struct mlx4_vhcr *vhcr;
struct mlx4_cmd_mailbox *inbox = NULL;
struct mlx4_cmd_mailbox *outbox = NULL;
u64 in_param;
u64 out_param;
int ret = 0;
int i;
int err = 0;
/* Create sw representation of Virtual HCR */
vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
if (!vhcr)
return -ENOMEM;
/* DMA in the vHCR */
if (!in_vhcr) {
ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
priv->mfunc.master.slave_state[slave].vhcr_dma,
ALIGN(sizeof(struct mlx4_vhcr_cmd),
MLX4_ACCESS_MEM_ALIGN), 1);
if (ret) {
if (!(dev->persist->state &
MLX4_DEVICE_STATE_INTERNAL_ERROR))
mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
__func__, ret);
kfree(vhcr);
return ret;
}
}
/* Fill SW VHCR fields */
vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
vhcr->token = be16_to_cpu(vhcr_cmd->token);
vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
/* Lookup command */
for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
if (vhcr->op == cmd_info[i].opcode) {
cmd = &cmd_info[i];
break;
}
}
if (!cmd) {
mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
vhcr->op, slave);
vhcr_cmd->status = CMD_STAT_BAD_PARAM;
goto out_status;
}
/* Read inbox */
if (cmd->has_inbox) {
vhcr->in_param &= INBOX_MASK;
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox)) {
vhcr_cmd->status = CMD_STAT_BAD_SIZE;
inbox = NULL;
goto out_status;
}
ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
vhcr->in_param,
MLX4_MAILBOX_SIZE, 1);
if (ret) {
if (!(dev->persist->state &
MLX4_DEVICE_STATE_INTERNAL_ERROR))
mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
__func__, cmd->opcode);
vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
goto out_status;
}
}
/* Apply permission and bound checks if applicable */
if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
vhcr->op, slave, vhcr->in_modifier);
vhcr_cmd->status = CMD_STAT_BAD_OP;
goto out_status;
}
/* Allocate outbox */
if (cmd->has_outbox) {
outbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outbox)) {
vhcr_cmd->status = CMD_STAT_BAD_SIZE;
outbox = NULL;
goto out_status;
}
}
/* Execute the command! */
if (cmd->wrapper) {
err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
cmd);
if (cmd->out_is_imm)
vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
} else {
in_param = cmd->has_inbox ? (u64) inbox->dma :
vhcr->in_param;
out_param = cmd->has_outbox ? (u64) outbox->dma :
vhcr->out_param;
err = __mlx4_cmd(dev, in_param, &out_param,
cmd->out_is_imm, vhcr->in_modifier,
vhcr->op_modifier, vhcr->op,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (cmd->out_is_imm) {
vhcr->out_param = out_param;
vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
}
}
if (err) {
if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
if (vhcr->op == MLX4_CMD_ALLOC_RES &&
(vhcr->in_modifier & 0xff) == RES_COUNTER &&
err == -EDQUOT)
mlx4_dbg(dev,
"Unable to allocate counter for slave %d (%d)\n",
slave, err);
else
mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
vhcr->op, slave, vhcr->errno, err);
}
vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status;
}
/* Write outbox if command completed successfully */
if (cmd->has_outbox && !vhcr_cmd->status) {
ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
vhcr->out_param,
MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
if (ret) {
/* If we failed to write back the outbox after the
*command was successfully executed, we must fail this
* slave, as it is now in undefined state */
if (!(dev->persist->state &
MLX4_DEVICE_STATE_INTERNAL_ERROR))
mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
goto out;
}
}
out_status:
/* DMA back vhcr result */
if (!in_vhcr) {
ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
priv->mfunc.master.slave_state[slave].vhcr_dma,
ALIGN(sizeof(struct mlx4_vhcr),
MLX4_ACCESS_MEM_ALIGN),
MLX4_CMD_WRAPPED);
if (ret)
mlx4_err(dev, "%s:Failed writing vhcr result\n",
__func__);
else if (vhcr->e_bit &&
mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
slave);
}
out:
kfree(vhcr);
mlx4_free_cmd_mailbox(dev, inbox);
mlx4_free_cmd_mailbox(dev, outbox);
return ret;
}
static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port)
{
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_vport_state *vp_admin;
struct mlx4_vf_immed_vlan_work *work;
struct mlx4_dev *dev = &(priv->dev);
int err;
int admin_vlan_ix = NO_INDX;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
vp_oper->state.default_qos == vp_admin->default_qos &&
vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
vp_oper->state.link_state == vp_admin->link_state &&
vp_oper->state.qos_vport == vp_admin->qos_vport)
return 0;
if (!(priv->mfunc.master.slave_state[slave].active &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
/* even if the UPDATE_QP command isn't supported, we still want
* to set this VF link according to the admin directive
*/
vp_oper->state.link_state = vp_admin->link_state;
return -1;
}
mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
slave, port);
mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
vp_admin->default_vlan, vp_admin->default_qos,
vp_admin->link_state);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
return -ENOMEM;
if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan,
&admin_vlan_ix);
if (err) {
kfree(work);
mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
return err;
}
} else {
admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
mlx4_dbg(&priv->dev,
"alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_admin->default_vlan),
admin_vlan_ix, slave, port);
}
/* save original vlan ix and vlan id */
work->orig_vlan_id = vp_oper->state.default_vlan;
work->orig_vlan_ix = vp_oper->vlan_idx;
/* handle new qos */
if (vp_oper->state.default_qos != vp_admin->default_qos)
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
vp_oper->vlan_idx = admin_vlan_ix;
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
vp_oper->state.vlan_proto = vp_admin->vlan_proto;
vp_oper->state.link_state = vp_admin->link_state;
vp_oper->state.qos_vport = vp_admin->qos_vport;
if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
/* iterate over QPs owned by this slave, using UPDATE_QP */
work->port = port;
work->slave = slave;
work->qos = vp_oper->state.default_qos;
work->qos_vport = vp_oper->state.qos_vport;
work->vlan_id = vp_oper->state.default_vlan;
work->vlan_ix = vp_oper->vlan_idx;
work->vlan_proto = vp_oper->state.vlan_proto;
work->priv = priv;
INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
queue_work(priv->mfunc.master.comm_wq, &work->work);
return 0;
}
static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
{
struct mlx4_qos_manager *port_qos_ctl;
struct mlx4_priv *priv = mlx4_priv(dev);
port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
/* Enable only default prio at PF init routine */
set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
}
static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
{
int i;
int err;
int num_vfs;
u16 available_vpp;
u8 vpp_param[MLX4_NUM_UP];
struct mlx4_qos_manager *port_qos;
struct mlx4_priv *priv = mlx4_priv(dev);
err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param);
if (err) {
mlx4_info(dev, "Failed query available VPPs\n");
return;
}
port_qos = &priv->mfunc.master.qos_ctl[port];
num_vfs = (available_vpp /
bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
for (i = 0; i < MLX4_NUM_UP; i++) {
if (test_bit(i, port_qos->priority_bm))
vpp_param[i] = num_vfs;
}
err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
if (err) {
mlx4_info(dev, "Failed allocating VPPs\n");
return;
}
/* Query actual allocated VPP, just to make sure */
err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param);
if (err) {
mlx4_info(dev, "Failed query available VPPs\n");
return;
}
port_qos->num_of_qos_vfs = num_vfs;
mlx4_dbg(dev, "Port %d Available VPPs %d\n", port, available_vpp);
for (i = 0; i < MLX4_NUM_UP; i++)
mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
vpp_param[i]);
}
static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
{
int p, port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_slave_state *slave_state =
&priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
slave_state->vst_qinq_supported) {
vp_oper->state.vlan_proto = vp_admin->vlan_proto;
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
}
vp_oper->state.link_state = vp_admin->link_state;
vp_oper->state.mac = vp_admin->mac;
vp_oper->state.spoofchk = vp_admin->spoofchk;
vp_oper->state.tx_rate = vp_admin->tx_rate;
vp_oper->state.qos_vport = vp_admin->qos_vport;
vp_oper->state.guid = vp_admin->guid;
if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
vp_oper->state.default_vlan = MLX4_VGT;
vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
return err;
}
mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_oper->state.default_vlan),
vp_oper->vlan_idx, slave, port);
}
if (vp_admin->spoofchk) {
vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
port,
vp_admin->mac);
if (0 > vp_oper->mac_idx) {
err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX;
mlx4_warn(&priv->dev,
"No mac resources slave %d, port %d\n",
slave, port);
return err;
}
mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
vp_oper->state.mac, vp_oper->mac_idx, slave, port);
}
}
return 0;
}
static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
{
int p, port;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
MLX4_VF_SMI_DISABLED;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (NO_INDX != vp_oper->vlan_idx) {
__mlx4_unregister_vlan(&priv->dev,
port, vp_oper->state.default_vlan);
vp_oper->vlan_idx = NO_INDX;
}
if (NO_INDX != vp_oper->mac_idx) {
__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
vp_oper->mac_idx = NO_INDX;
}
}
return;
}
static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
u16 param, u8 toggle)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
u32 reply;
u8 is_going_down = 0;
int i;
unsigned long flags;
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
if (toggle != slave_state[slave].comm_toggle) {
mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
toggle, slave);
goto reset_slave;
}
if (cmd == MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
slave_state[slave].old_vlan_api = false;
slave_state[slave].vst_qinq_supported = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
slave_state[slave].event_eq[i].token = 0;
}
/*check if we are in the middle of FLR process,
if so return "retry" status to the slave*/
if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
goto inform_slave_state;
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, &slave);
/* write the version in the event field */
reply |= mlx4_comm_get_version();
goto reset_slave;
}
/*command from slave in the middle of FLR*/
if (cmd != MLX4_COMM_CMD_RESET &&
MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
slave, cmd);
return;
}
switch (cmd) {
case MLX4_COMM_CMD_VHCR0:
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
goto reset_slave;
slave_state[slave].vhcr_dma = ((u64) param) << 48;
priv->mfunc.master.slave_state[slave].cookie = 0;
break;
case MLX4_COMM_CMD_VHCR1:
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
goto reset_slave;
slave_state[slave].vhcr_dma |= ((u64) param) << 32;
break;
case MLX4_COMM_CMD_VHCR2:
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
goto reset_slave;
slave_state[slave].vhcr_dma |= ((u64) param) << 16;
break;
case MLX4_COMM_CMD_VHCR_EN:
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
goto reset_slave;
slave_state[slave].vhcr_dma |= param;
if (mlx4_master_activate_admin_state(priv, slave))
goto reset_slave;
slave_state[slave].active = true;
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, &slave);
break;
case MLX4_COMM_CMD_VHCR_POST:
if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
(slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
slave, cmd, slave_state[slave].last_cmd);
goto reset_slave;
}
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
slave);
mutex_unlock(&priv->cmd.slave_cmd_mutex);
goto reset_slave;
}
mutex_unlock(&priv->cmd.slave_cmd_mutex);
break;
default:
mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
goto reset_slave;
}
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = cmd;
else
is_going_down = 1;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
cmd, slave);
return;
}
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
return;
reset_slave:
/* cleanup any slave resources */
if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_delete_all_resources_for_slave(dev, slave);
if (cmd != MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
slave, cmd);
/* Turn on internal error letting slave reset itself immeditaly,
* otherwise it might take till timeout on command is passed
*/
reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
}
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
if (!slave_state[slave].is_slave_going_down)
slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
/*with slave in the middle of flr, no need to clean resources again.*/
inform_slave_state:
memset(&slave_state[slave].event_eq, 0,
sizeof(struct mlx4_slave_event_eq_info));
__raw_writel((__force u32) cpu_to_be32(reply),
&priv->mfunc.comm[slave].slave_read);
wmb();
}
/* master command processing */
void mlx4_master_comm_channel(struct work_struct *work)
{
struct mlx4_mfunc_master_ctx *master =
container_of(work,
struct mlx4_mfunc_master_ctx,
comm_work);
struct mlx4_mfunc *mfunc =
container_of(master, struct mlx4_mfunc, master);
struct mlx4_priv *priv =
container_of(mfunc, struct mlx4_priv, mfunc);
struct mlx4_dev *dev = &priv->dev;
u32 lbit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
u32 nmbr_bits;
u32 comm_cmd;
int i, slave;
int toggle;
bool first = true;
int served = 0;
int reported = 0;
u32 slt;
for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++)
lbit_vec[i] = be32_to_cpu(master->comm_arm_bit_vector[i]);
nmbr_bits = dev->persist->num_vfs + 1;
if (++master->next_slave >= nmbr_bits)
master->next_slave = 0;
slave = master->next_slave;
while (true) {
slave = find_next_bit((const unsigned long *)&lbit_vec, nmbr_bits, slave);
if (!first && slave >= master->next_slave)
break;
if (slave == nmbr_bits) {
if (!first)
break;
first = false;
slave = 0;
continue;
}
++reported;
comm_cmd = swab32(readl(&mfunc->comm[slave].slave_write));
slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31;
toggle = comm_cmd >> 31;
if (toggle != slt) {
if (master->slave_state[slave].comm_toggle
!= slt) {
pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
slave, slt,
master->slave_state[slave].comm_toggle);
master->slave_state[slave].comm_toggle =
slt;
}
mlx4_master_do_cmd(dev, slave,
comm_cmd >> 16 & 0xff,
comm_cmd & 0xffff, toggle);
++served;
}
slave++;
}
if (reported && reported != served)
mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
reported, served);
if (mlx4_ARM_COMM_CHANNEL(dev))
mlx4_warn(dev, "Failed to arm comm channel events\n");
}
static int sync_toggles(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u32 wr_toggle;
u32 rd_toggle;
unsigned long end;
wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
if (wr_toggle == 0xffffffff)
end = jiffies + msecs_to_jiffies(30000);
else
end = jiffies + msecs_to_jiffies(5000);
while (time_before(jiffies, end)) {
rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
/* PCI might be offline */
/* If device removal has been requested,
* do not continue retrying.
*/
if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_NOWAIT) {
mlx4_warn(dev,
"communication channel is offline\n");
return -EIO;
}
msleep(100);
wr_toggle = swab32(readl(&priv->mfunc.comm->
slave_write));
continue;
}
if (rd_toggle >> 31 == wr_toggle >> 31) {
priv->cmd.comm_toggle = rd_toggle >> 31;
return 0;
}
cond_resched();
}
/*
* we could reach here if for example the previous VM using this
* function misbehaved and left the channel with unsynced state. We
* should fix this here and give this VM a chance to use a properly
* synced channel
*/
mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
priv->cmd.comm_toggle = 0;
return 0;
}
int mlx4_multi_func_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state;
int i, j, err, port;
if (mlx4_is_master(dev))
priv->mfunc.comm =
ioremap(pci_resource_start(dev->persist->pdev,
priv->fw.comm_bar) +
priv->fw.comm_base, MLX4_COMM_PAGESIZE);
else
priv->mfunc.comm =
ioremap(pci_resource_start(dev->persist->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
if (!priv->mfunc.comm) {
mlx4_err(dev, "Couldn't map communication vector\n");
goto err_vhcr;
}
if (mlx4_is_master(dev)) {
struct mlx4_vf_oper_state *vf_oper;
struct mlx4_vf_admin_state *vf_admin;
priv->mfunc.master.slave_state =
kcalloc(dev->num_slaves,
sizeof(struct mlx4_slave_state),
GFP_KERNEL);
if (!priv->mfunc.master.slave_state)
goto err_comm;
priv->mfunc.master.vf_admin =
kcalloc(dev->num_slaves,
sizeof(struct mlx4_vf_admin_state),
GFP_KERNEL);
if (!priv->mfunc.master.vf_admin)
goto err_comm_admin;
priv->mfunc.master.vf_oper =
kcalloc(dev->num_slaves,
sizeof(struct mlx4_vf_oper_state),
GFP_KERNEL);
if (!priv->mfunc.master.vf_oper)
goto err_comm_oper;
priv->mfunc.master.next_slave = 0;
for (i = 0; i < dev->num_slaves; ++i) {
vf_admin = &priv->mfunc.master.vf_admin[i];
vf_oper = &priv->mfunc.master.vf_oper[i];
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
s_state->vst_qinq_supported = false;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
__raw_writel((__force u32) 0,
&priv->mfunc.comm[i].slave_write);
__raw_writel((__force u32) 0,
&priv->mfunc.comm[i].slave_read);
for (port = 1; port <= MLX4_MAX_PORTS; port++) {
struct mlx4_vport_state *admin_vport;
struct mlx4_vport_state *oper_vport;
s_state->vlan_filter[port] =
kzalloc(sizeof(struct mlx4_vlan_fltr),
GFP_KERNEL);
if (!s_state->vlan_filter[port]) {
if (--port)
kfree(s_state->vlan_filter[port]);
goto err_slaves;
}
admin_vport = &vf_admin->vport[port];
oper_vport = &vf_oper->vport[port].state;
INIT_LIST_HEAD(&s_state->mcast_filters[port]);
admin_vport->default_vlan = MLX4_VGT;
oper_vport->default_vlan = MLX4_VGT;
admin_vport->qos_vport =
MLX4_VPP_DEFAULT_VPORT;
oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
admin_vport->vlan_proto = htons(ETH_P_8021Q);
oper_vport->vlan_proto = htons(ETH_P_8021Q);
vf_oper->vport[port].vlan_idx = NO_INDX;
vf_oper->vport[port].mac_idx = NO_INDX;
mlx4_set_random_admin_guid(dev, i, port);
}
spin_lock_init(&s_state->lock);
}
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
for (port = 1; port <= dev->caps.num_ports; port++) {
if (mlx4_is_eth(dev, port)) {
mlx4_set_default_port_qos(dev, port);
mlx4_allocate_port_vpps(dev, port);
}
}
}
memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
INIT_WORK(&priv->mfunc.master.slave_event_work,
mlx4_gen_slave_eqe);
INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
mlx4_master_handle_slave_flr);
spin_lock_init(&priv->mfunc.master.slave_state_lock);
spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
priv->mfunc.master.comm_wq =
create_singlethread_workqueue("mlx4_comm");
if (!priv->mfunc.master.comm_wq)
goto err_slaves;
if (mlx4_init_resource_tracker(dev))
goto err_thread;
} else {
err = sync_toggles(dev);
if (err) {
mlx4_err(dev, "Couldn't sync toggles\n");
goto err_comm;
}
}
return 0;
err_thread:
destroy_workqueue(priv->mfunc.master.comm_wq);
err_slaves:
while (i--) {
for (port = 1; port <= MLX4_MAX_PORTS; port++)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
kfree(priv->mfunc.master.vf_oper);
err_comm_oper:
kfree(priv->mfunc.master.vf_admin);
err_comm_admin:
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
priv->mfunc.comm = NULL;
err_vhcr:
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr,
priv->mfunc.vhcr_dma);
priv->mfunc.vhcr = NULL;
return -ENOMEM;
}
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int flags = 0;
if (!priv->cmd.initialized) {
init_rwsem(&priv->cmd.switch_sem);
mutex_init(&priv->cmd.slave_cmd_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
priv->cmd.toggle = 1;
priv->cmd.initialized = 1;
flags |= MLX4_CMD_CLEANUP_STRUCT;
}
if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
mlx4_err(dev, "Couldn't map command register\n");
goto err;
}
flags |= MLX4_CMD_CLEANUP_HCR;
}
if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
PAGE_SIZE,
&priv->mfunc.vhcr_dma,
GFP_KERNEL);
if (!priv->mfunc.vhcr)
goto err;
flags |= MLX4_CMD_CLEANUP_VHCR;
}
if (!priv->cmd.pool) {
priv->cmd.pool = dma_pool_create("mlx4_cmd",
&dev->persist->pdev->dev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
if (!priv->cmd.pool)
goto err;
flags |= MLX4_CMD_CLEANUP_POOL;
}
return 0;
err:
mlx4_cmd_cleanup(dev, flags);
return -ENOMEM;
}
void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int slave;
u32 slave_read;
/* If the comm channel has not yet been initialized,
* skip reporting the internal error event to all
* the communication channels.
*/
if (!priv->mfunc.comm)
return;
/* Report an internal error event to all
* communication channels.
*/
for (slave = 0; slave < dev->num_slaves; slave++) {
slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
__raw_writel((__force u32)cpu_to_be32(slave_read),
&priv->mfunc.comm[slave].slave_read);
}
}
void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, port;
if (mlx4_is_master(dev)) {
destroy_workqueue(priv->mfunc.master.comm_wq);
for (i = 0; i < dev->num_slaves; i++) {
for (port = 1; port <= MLX4_MAX_PORTS; port++)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
kfree(priv->mfunc.master.slave_state);
kfree(priv->mfunc.master.vf_admin);
kfree(priv->mfunc.master.vf_oper);
dev->num_slaves = 0;
}
iounmap(priv->mfunc.comm);
priv->mfunc.comm = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
dma_pool_destroy(priv->cmd.pool);
priv->cmd.pool = NULL;
}
if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
(cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
iounmap(priv->cmd.hcr);
priv->cmd.hcr = NULL;
}
if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
(cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
priv->mfunc.vhcr = NULL;
}
if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
priv->cmd.initialized = 0;
}
/*
* Switch to using events to issue FW commands (can only be called
* after event queue for command events has been initialized).
*/
int mlx4_cmd_use_events(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
int err = 0;
priv->cmd.context = kmalloc_array(priv->cmd.max_cmds,
sizeof(struct mlx4_cmd_context),
GFP_KERNEL);
if (!priv->cmd.context)
return -ENOMEM;
if (mlx4_is_mfunc(dev))
mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
/* To support fatal error flow, initialize all
* cmd contexts to allow simulating completions
* with complete() at any time.
*/
init_completion(&priv->cmd.context[i].done);
}
priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;
priv->cmd.token_mask <<= 1)
; /* nothing */
--priv->cmd.token_mask;
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
up_write(&priv->cmd.switch_sem);
if (mlx4_is_mfunc(dev))
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return err;
}
/*
* Switch back to polling (used when shutting down the device)
*/
void mlx4_cmd_use_polling(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
if (mlx4_is_mfunc(dev))
mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
for (i = 0; i < priv->cmd.max_cmds; ++i)
down(&priv->cmd.event_sem);
kfree(priv->cmd.context);
priv->cmd.context = NULL;
up(&priv->cmd.poll_sem);
up_write(&priv->cmd.switch_sem);
if (mlx4_is_mfunc(dev))
mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
{
struct mlx4_cmd_mailbox *mailbox;
mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
if (!mailbox)
return ERR_PTR(-ENOMEM);
mailbox->buf = dma_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
&mailbox->dma);
if (!mailbox->buf) {
kfree(mailbox);
return ERR_PTR(-ENOMEM);
}
return mailbox;
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox)
{
if (!mailbox)
return;
dma_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox);
}
EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
u32 mlx4_comm_get_version(void)
{
return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
}
static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
{
if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
vf, dev->persist->num_vfs);
return -EINVAL;
}
return vf+1;
}
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
{
if (slave < 1 || slave > dev->persist->num_vfs) {
mlx4_err(dev,
"Bad slave number:%d (number of activated slaves: %lu)\n",
slave, dev->num_slaves);
return -EINVAL;
}
return slave - 1;
}
void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_context *context;
int i;
spin_lock(&priv->cmd.context_lock);
if (priv->cmd.context) {
for (i = 0; i < priv->cmd.max_cmds; ++i) {
context = &priv->cmd.context[i];
context->fw_status = CMD_STAT_INTERNAL_ERR;
context->result =
mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
complete(&context->done);
}
}
spin_unlock(&priv->cmd.context_lock);
}
struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
{
struct mlx4_active_ports actv_ports;
int vf;
bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
if (slave == 0) {
bitmap_fill(actv_ports.ports, dev->caps.num_ports);
return actv_ports;
}
vf = mlx4_get_vf_indx(dev, slave);
if (vf < 0)
return actv_ports;
bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
dev->caps.num_ports));
return actv_ports;
}
EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
{
unsigned n;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
if (port <= 0 || port > m)
return -EINVAL;
n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
if (port <= n)
port = n + 1;
return port;
}
EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
if (test_bit(port - 1, actv_ports.ports))
return port -
find_first_bit(actv_ports.ports, dev->caps.num_ports);
return -1;
}
EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
int port)
{
unsigned i;
struct mlx4_slaves_pport slaves_pport;
bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
if (port <= 0 || port > dev->caps.num_ports)
return slaves_pport;
for (i = 0; i < dev->persist->num_vfs + 1; i++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, i);
if (test_bit(port - 1, actv_ports.ports))
set_bit(i, slaves_pport.slaves);
}
return slaves_pport;
}
EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
struct mlx4_dev *dev,
const struct mlx4_active_ports *crit_ports)
{
unsigned i;
struct mlx4_slaves_pport slaves_pport;
bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
for (i = 0; i < dev->persist->num_vfs + 1; i++) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, i);
if (bitmap_equal(crit_ports->ports, actv_ports.ports,
dev->caps.num_ports))
set_bit(i, slaves_pport.slaves);
}
return slaves_pport;
}
EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
+ 1;
int max_port = min_port +
bitmap_weight(actv_ports.ports, dev->caps.num_ports);
if (port < min_port)
port = min_port;
else if (port >= max_port)
port = max_port - 1;
return port;
}
static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
int max_tx_rate)
{
int i;
int err;
struct mlx4_qos_manager *port_qos;
struct mlx4_dev *dev = &priv->dev;
struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
port_qos = &priv->mfunc.master.qos_ctl[port];
memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
if (slave > port_qos->num_of_qos_vfs) {
mlx4_info(dev, "No available VPP resources for this VF\n");
return -EINVAL;
}
/* Query for default QoS values from Vport 0 is needed */
err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
if (err) {
mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
return err;
}
for (i = 0; i < MLX4_NUM_UP; i++) {
if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
vpp_qos[i].max_avg_bw = max_tx_rate;
vpp_qos[i].enable = 1;
} else {
/* if user supplied tx_rate == 0, meaning no rate limit
* configuration is required. so we are leaving the
* value of max_avg_bw as queried from Vport 0.
*/
vpp_qos[i].enable = 0;
}
}
err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
if (err) {
mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
return err;
}
return 0;
}
static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
struct mlx4_vport_state *vf_admin)
{
struct mlx4_qos_manager *info;
struct mlx4_priv *priv = mlx4_priv(dev);
if (!mlx4_is_master(dev) ||
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
return false;
info = &priv->mfunc.master.qos_ctl[port];
if (vf_admin->default_vlan != MLX4_VGT &&
test_bit(vf_admin->default_qos, info->priority_bm))
return true;
return false;
}
static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
struct mlx4_vport_state *vf_admin,
int vlan, int qos)
{
struct mlx4_vport_state dummy_admin = {0};
if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
!vf_admin->tx_rate)
return true;
dummy_admin.default_qos = qos;
dummy_admin.default_vlan = vlan;
/* VF wants to move to other VST state which is valid with current
* rate limit. Either differnt default vlan in VST or other
* supported QoS priority. Otherwise we don't allow this change when
* the TX rate is still configured.
*/
if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
return true;
mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
(vlan == MLX4_VGT) ? "VGT" : "VST");
if (vlan != MLX4_VGT)
mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
return false;
}
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;
if (!mlx4_is_master(dev))
return -EPROTONOSUPPORT;
if (is_multicast_ether_addr(mac))
return -EINVAL;
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
if (s_info->spoofchk && is_zero_ether_addr(mac)) {
mlx4_info(dev, "MAC invalidation is not allowed when spoofchk is on\n");
return -EPERM;
}
s_info->mac = ether_addr_to_u64(mac);
mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
__be16 proto)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *vf_admin;
struct mlx4_slave_state *slave_state;
struct mlx4_vport_oper_state *vf_oper;
int slave;
if ((!mlx4_is_master(dev)) ||
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
return -EPROTONOSUPPORT;
if ((vlan > 4095) || (qos > 7))
return -EINVAL;
if (proto == htons(ETH_P_8021AD) &&
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
return -EPROTONOSUPPORT;
if (proto != htons(ETH_P_8021Q) &&
proto != htons(ETH_P_8021AD))
return -EINVAL;
if ((proto == htons(ETH_P_8021AD)) &&
((vlan == 0) || (vlan == MLX4_VGT)))
return -EINVAL;
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
slave_state = &priv->mfunc.master.slave_state[slave];
if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
(!slave_state->vst_qinq_supported)) {
mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
return -EPROTONOSUPPORT;
}
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
return -EPERM;
if ((0 == vlan) && (0 == qos))
vf_admin->default_vlan = MLX4_VGT;
else
vf_admin->default_vlan = vlan;
vf_admin->default_qos = qos;
vf_admin->vlan_proto = proto;
/* If rate was configured prior to VST, we saved the configured rate
* in vf_admin->rate and now, if priority supported we enforce the QoS
*/
if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
vf_admin->tx_rate)
vf_admin->qos_vport = slave;
/* Try to activate new vf state without restart,
* this option is not supported while moving to VST QinQ mode.
*/
if ((proto == htons(ETH_P_8021AD) &&
vf_oper->state.vlan_proto != proto) ||
mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_info(dev,
"updating vf %d port %d config will take effect on next VF restart\n",
vf, port);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
int max_tx_rate)
{
int err;
int slave;
struct mlx4_vport_state *vf_admin;
struct mlx4_priv *priv = mlx4_priv(dev);
if (!mlx4_is_master(dev) ||
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
return -EPROTONOSUPPORT;
if (min_tx_rate) {
mlx4_info(dev, "Minimum BW share not supported\n");
return -EPROTONOSUPPORT;
}
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
if (err) {
mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
max_tx_rate);
return err;
}
vf_admin->tx_rate = max_tx_rate;
/* if VF is not in supported mode (VST with supported prio),
* we do not change vport configuration for its QPs, but save
* the rate, so it will be enforced when it moves to supported
* mode next time.
*/
if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
mlx4_info(dev,
"rate set for VF %d when not in valid state\n", vf);
if (vf_admin->default_vlan != MLX4_VGT)
mlx4_info(dev, "VST priority not supported by QoS\n");
else
mlx4_info(dev, "VF in VGT mode (needed VST)\n");
mlx4_info(dev,
"rate %d take affect when VF moves to valid state\n",
max_tx_rate);
return 0;
}
/* If user sets rate 0 assigning default vport for its QPs */
vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
if (priv->mfunc.master.slave_state[slave].active &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
/* mlx4_get_slave_default_vlan -
* return true if VST ( default vlan)
* if VST, will return vlan & qos (if not NULL)
*/
bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
u16 *vlan, u8 *qos)
{
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_priv *priv;
priv = mlx4_priv(dev);
port = mlx4_slaves_closest_port(dev, slave, port);
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (MLX4_VGT != vp_oper->state.default_vlan) {
if (vlan)
*vlan = vp_oper->state.default_vlan;
if (qos)
*qos = vp_oper->state.default_qos;
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;
u8 mac[ETH_ALEN];
if ((!mlx4_is_master(dev)) ||
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
return -EPROTONOSUPPORT;
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
u64_to_ether_addr(s_info->mac, mac);
if (setting && !is_valid_ether_addr(mac)) {
mlx4_info(dev, "Illegal MAC with spoofchk\n");
return -EPERM;
}
s_info->spoofchk = setting;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;
if (!mlx4_is_master(dev))
return -EPROTONOSUPPORT;
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
ivf->vf = vf;
/* need to convert it to a func */
ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
ivf->mac[5] = ((s_info->mac) & 0xff);
ivf->vlan = s_info->default_vlan;
ivf->qos = s_info->default_qos;
ivf->vlan_proto = s_info->vlan_proto;
if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
ivf->max_tx_rate = s_info->tx_rate;
else
ivf->max_tx_rate = 0;
ivf->min_tx_rate = 0;
ivf->spoofchk = s_info->spoofchk;
ivf->linkstate = s_info->link_state;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *s_info;
int slave;
u8 link_stat_event;
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
/* get current link state */
if (!priv->sense.do_sense_port[port])
link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
else
link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
break;
case IFLA_VF_LINK_STATE_ENABLE:
link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
break;
case IFLA_VF_LINK_STATE_DISABLE:
link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
break;
default:
mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
link_state, slave, port);
return -EINVAL;
}
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->link_state = link_state;
/* send event */
mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_dbg(dev,
"updating vf %d port %d no link state HW enforcement\n",
vf, port);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
struct mlx4_counter *counter_stats, int reset)
{
struct mlx4_cmd_mailbox *mailbox = NULL;
struct mlx4_counter *tmp_counter;
int err;
u32 if_stat_in_mod;
if (!counter_stats)
return -EINVAL;
if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
if_stat_in_mod = counter_index;
if (reset)
if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
err = mlx4_cmd_box(dev, 0, mailbox->dma,
if_stat_in_mod, 0,
MLX4_CMD_QUERY_IF_STAT,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err) {
mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
__func__, counter_index);
goto if_stat_out;
}
tmp_counter = (struct mlx4_counter *)mailbox->buf;
counter_stats->counter_mode = tmp_counter->counter_mode;
if (counter_stats->counter_mode == 0) {
counter_stats->rx_frames =
cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
be64_to_cpu(tmp_counter->rx_frames));
counter_stats->tx_frames =
cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
be64_to_cpu(tmp_counter->tx_frames));
counter_stats->rx_bytes =
cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
be64_to_cpu(tmp_counter->rx_bytes));
counter_stats->tx_bytes =
cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
be64_to_cpu(tmp_counter->tx_bytes));
}
if_stat_out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
struct ifla_vf_stats *vf_stats)
{
struct mlx4_counter tmp_vf_stats;
int slave;
int err = 0;
if (!vf_stats)
return -EINVAL;
if (!mlx4_is_master(dev))
return -EPROTONOSUPPORT;
slave = mlx4_get_slave_indx(dev, vf_idx);
if (slave < 0)
return -EINVAL;
port = mlx4_slaves_closest_port(dev, slave, port);
err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
if (!err && tmp_vf_stats.counter_mode == 0) {
vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
}
return err;
}
EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (slave < 1 || slave >= dev->num_slaves ||
port < 1 || port > MLX4_MAX_PORTS)
return 0;
return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
MLX4_VF_SMI_ENABLED;
}
EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (slave == mlx4_master_func_num(dev))
return 1;
if (slave < 1 || slave >= dev->num_slaves ||
port < 1 || port > MLX4_MAX_PORTS)
return 0;
return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
MLX4_VF_SMI_ENABLED;
}
EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
int enabled)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
int min_port = find_first_bit(actv_ports.ports,
priv->dev.caps.num_ports) + 1;
int max_port = min_port - 1 +
bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
if (slave == mlx4_master_func_num(dev))
return 0;
if (slave < 1 || slave >= dev->num_slaves ||
port < 1 || port > MLX4_MAX_PORTS ||
enabled < 0 || enabled > 1)
return -EINVAL;
if (min_port == max_port && dev->caps.num_ports > 1) {
mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
return -EPROTONOSUPPORT;
}
priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/cmd.c
|
/*
* Copyright (c) 2011 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/dcbnl.h>
#include <linux/math64.h>
#include "mlx4_en.h"
#include "fw_qos.h"
enum {
MLX4_CEE_STATE_DOWN = 0,
MLX4_CEE_STATE_UP = 1,
};
/* Definitions for QCN
*/
struct mlx4_congestion_control_mb_prio_802_1_qau_params {
__be32 modify_enable_high;
__be32 modify_enable_low;
__be32 reserved1;
__be32 extended_enable;
__be32 rppp_max_rps;
__be32 rpg_time_reset;
__be32 rpg_byte_reset;
__be32 rpg_threshold;
__be32 rpg_max_rate;
__be32 rpg_ai_rate;
__be32 rpg_hai_rate;
__be32 rpg_gd;
__be32 rpg_min_dec_fac;
__be32 rpg_min_rate;
__be32 max_time_rise;
__be32 max_byte_rise;
__be32 max_qdelta;
__be32 min_qoffset;
__be32 gd_coefficient;
__be32 reserved2[5];
__be32 cp_sample_base;
__be32 reserved3[39];
};
struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
__be64 rppp_rp_centiseconds;
__be32 reserved1;
__be32 ignored_cnm;
__be32 rppp_created_rps;
__be32 estimated_total_rate;
__be32 max_active_rate_limiter_index;
__be32 dropped_cnms_busy_fw;
__be32 reserved2;
__be32 cnms_handled_successfully;
__be32 min_total_limiters_rate;
__be32 max_total_limiters_rate;
__be32 reserved3[4];
};
static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
switch (capid) {
case DCB_CAP_ATTR_PFC:
*cap = true;
break;
case DCB_CAP_ATTR_DCBX:
*cap = priv->dcbx_cap;
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 1 << mlx4_max_tc(priv->mdev->dev);
break;
default:
*cap = false;
break;
}
return 0;
}
static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
return priv->cee_config.pfc_state;
}
static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
priv->cee_config.pfc_state = state;
}
static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
u8 *setting)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
*setting = priv->cee_config.dcb_pfc[priority];
}
static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
u8 setting)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
priv->cee_config.dcb_pfc[priority] = setting;
priv->cee_config.pfc_state = true;
}
static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
return -EINVAL;
if (tcid == DCB_NUMTCS_ATTR_PFC)
*num = mlx4_max_tc(priv->mdev->dev);
else
*num = 0;
return 0;
}
static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
u8 tx_pause, tx_ppp, rx_pause, rx_ppp;
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (priv->cee_config.pfc_state) {
int tc;
rx_ppp = prof->rx_ppp;
tx_ppp = prof->tx_ppp;
for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
u8 tc_mask = 1 << tc;
switch (priv->cee_config.dcb_pfc[tc]) {
case pfc_disabled:
tx_ppp &= ~tc_mask;
rx_ppp &= ~tc_mask;
break;
case pfc_enabled_full:
tx_ppp |= tc_mask;
rx_ppp |= tc_mask;
break;
case pfc_enabled_tx:
tx_ppp |= tc_mask;
rx_ppp &= ~tc_mask;
break;
case pfc_enabled_rx:
tx_ppp &= ~tc_mask;
rx_ppp |= tc_mask;
break;
default:
break;
}
}
rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause;
tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause;
} else {
rx_ppp = 0;
tx_ppp = 0;
rx_pause = prof->rx_pause;
tx_pause = prof->tx_pause;
}
if (mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
tx_pause, tx_ppp, rx_pause, rx_ppp)) {
en_err(priv, "Failed setting pause params\n");
return 1;
}
prof->tx_ppp = tx_ppp;
prof->rx_ppp = rx_ppp;
prof->tx_pause = tx_pause;
prof->rx_pause = rx_pause;
return 0;
}
static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
return MLX4_CEE_STATE_UP;
return MLX4_CEE_STATE_DOWN;
}
static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int num_tcs = 0;
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
return 0;
if (state) {
priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
num_tcs = IEEE_8021QAZ_MAX_TCS;
} else {
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
}
if (mlx4_en_alloc_tx_queue_per_tc(dev, num_tcs))
return 1;
return 0;
}
/* On success returns a non-zero 802.1p user priority bitmap
* otherwise returns 0 as the invalid user priority bitmap to
* indicate an error.
*/
static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct dcb_app app = {
.selector = idtype,
.protocol = id,
};
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 0;
return dcb_getapp(netdev, &app);
}
static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
u16 id, u8 up)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct dcb_app app;
if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return -EINVAL;
memset(&app, 0, sizeof(struct dcb_app));
app.selector = idtype;
app.protocol = id;
app.priority = up;
return dcb_setapp(netdev, &app);
}
static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
if (!my_ets)
return -EINVAL;
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
ets->cbs = my_ets->cbs;
memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
return 0;
}
static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
{
int i;
int total_ets_bw = 0;
int has_ets_tc = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] >= MLX4_EN_NUM_UP_HIGH) {
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
i, ets->prio_tc[i]);
return -EINVAL;
}
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
case IEEE_8021QAZ_TSA_STRICT:
break;
case IEEE_8021QAZ_TSA_ETS:
has_ets_tc = 1;
total_ets_bw += ets->tc_tx_bw[i];
break;
default:
en_err(priv, "TC[%d]: Not supported TSA: %d\n",
i, ets->tc_tsa[i]);
return -EOPNOTSUPP;
}
}
if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
total_ets_bw);
return -EINVAL;
}
return 0;
}
static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
struct ieee_ets *ets, u16 *ratelimit)
{
struct mlx4_en_dev *mdev = priv->mdev;
int num_strict = 0;
int i;
__u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
__u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
ets = ets ?: &priv->ets;
ratelimit = ratelimit ?: priv->maxrate;
/* higher TC means higher priority => lower pg */
for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
pg[i] = MLX4_EN_TC_VENDOR;
tc_tx_bw[i] = MLX4_EN_BW_MAX;
break;
case IEEE_8021QAZ_TSA_STRICT:
pg[i] = num_strict++;
tc_tx_bw[i] = MLX4_EN_BW_MAX;
break;
case IEEE_8021QAZ_TSA_ETS:
pg[i] = MLX4_EN_TC_ETS;
tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
break;
}
}
return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
ratelimit);
}
static int
mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
err = mlx4_en_ets_validate(priv, ets);
if (err)
return err;
err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
if (err)
return err;
err = mlx4_en_config_port_scheduler(priv, ets, NULL);
if (err)
return err;
memcpy(&priv->ets, ets, sizeof(priv->ets));
return 0;
}
static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
pfc->pfc_en = priv->prof->tx_ppp;
return 0;
}
static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
u32 tx_pause, tx_ppp, rx_pause, rx_ppp;
int err;
en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
pfc->pfc_cap,
pfc->pfc_en,
pfc->mbc,
pfc->delay);
rx_pause = prof->rx_pause && !pfc->pfc_en;
tx_pause = prof->tx_pause && !pfc->pfc_en;
rx_ppp = pfc->pfc_en;
tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
tx_pause, tx_ppp, rx_pause, rx_ppp);
if (err) {
en_err(priv, "Failed setting pause params\n");
return err;
}
mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
rx_ppp, rx_pause, tx_ppp, tx_pause);
prof->tx_ppp = tx_ppp;
prof->rx_ppp = rx_ppp;
prof->rx_pause = rx_pause;
prof->tx_pause = tx_pause;
return err;
}
static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
return priv->dcbx_cap;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
if (mode == priv->dcbx_cap)
return 0;
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
((mode & DCB_CAP_DCBX_VER_IEEE) &&
(mode & DCB_CAP_DCBX_VER_CEE)) ||
!(mode & DCB_CAP_DCBX_HOST))
goto err;
priv->dcbx_cap = mode;
ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
if (mode & DCB_CAP_DCBX_VER_IEEE) {
if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
goto err;
if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
goto err;
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
if (mlx4_en_dcbnl_set_all(dev))
goto err;
} else {
if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
goto err;
if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
goto err;
if (mlx4_en_alloc_tx_queue_per_tc(dev, 0))
goto err;
}
return 0;
err:
return 1;
}
#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
struct ieee_maxrate *maxrate)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
maxrate->tc_maxrate[i] =
priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
return 0;
}
static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
struct ieee_maxrate *maxrate)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 tmp[IEEE_8021QAZ_MAX_TCS];
int i, err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
/* Convert from Kbps into HW units, rounding result up.
* Setting to 0, means unlimited BW.
*/
tmp[i] = div_u64(maxrate->tc_maxrate[i] +
MLX4_RATELIMIT_UNITS_IN_KB - 1,
MLX4_RATELIMIT_UNITS_IN_KB);
}
err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
if (err)
return err;
memcpy(priv->maxrate, tmp, sizeof(priv->maxrate));
return 0;
}
#define RPG_ENABLE_BIT 31
#define CN_TAG_BIT 30
static int mlx4_en_dcbnl_ieee_getqcn(struct net_device *dev,
struct ieee_qcn *qcn)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
struct mlx4_cmd_mailbox *mailbox_out = NULL;
u64 mailbox_in_dma = 0;
u32 inmod = 0;
int i, err;
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
return -EOPNOTSUPP;
mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
if (IS_ERR(mailbox_out))
return -ENOMEM;
hw_qcn =
(struct mlx4_congestion_control_mb_prio_802_1_qau_params *)
mailbox_out->buf;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
inmod = priv->port | ((1 << i) << 8) |
(MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
mailbox_out->dma,
inmod, MLX4_CONGESTION_CONTROL_GET_PARAMS,
MLX4_CMD_CONGESTION_CTRL_OPCODE,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err) {
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
return err;
}
qcn->rpg_enable[i] =
be32_to_cpu(hw_qcn->extended_enable) >> RPG_ENABLE_BIT;
qcn->rppp_max_rps[i] =
be32_to_cpu(hw_qcn->rppp_max_rps);
qcn->rpg_time_reset[i] =
be32_to_cpu(hw_qcn->rpg_time_reset);
qcn->rpg_byte_reset[i] =
be32_to_cpu(hw_qcn->rpg_byte_reset);
qcn->rpg_threshold[i] =
be32_to_cpu(hw_qcn->rpg_threshold);
qcn->rpg_max_rate[i] =
be32_to_cpu(hw_qcn->rpg_max_rate);
qcn->rpg_ai_rate[i] =
be32_to_cpu(hw_qcn->rpg_ai_rate);
qcn->rpg_hai_rate[i] =
be32_to_cpu(hw_qcn->rpg_hai_rate);
qcn->rpg_gd[i] =
be32_to_cpu(hw_qcn->rpg_gd);
qcn->rpg_min_dec_fac[i] =
be32_to_cpu(hw_qcn->rpg_min_dec_fac);
qcn->rpg_min_rate[i] =
be32_to_cpu(hw_qcn->rpg_min_rate);
qcn->cndd_state_machine[i] =
priv->cndd_state[i];
}
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
return 0;
}
static int mlx4_en_dcbnl_ieee_setqcn(struct net_device *dev,
struct ieee_qcn *qcn)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_congestion_control_mb_prio_802_1_qau_params *hw_qcn;
struct mlx4_cmd_mailbox *mailbox_in = NULL;
u64 mailbox_in_dma = 0;
u32 inmod = 0;
int i, err;
#define MODIFY_ENABLE_HIGH_MASK 0xc0000000
#define MODIFY_ENABLE_LOW_MASK 0xffc00000
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
return -EOPNOTSUPP;
mailbox_in = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
if (IS_ERR(mailbox_in))
return -ENOMEM;
mailbox_in_dma = mailbox_in->dma;
hw_qcn =
(struct mlx4_congestion_control_mb_prio_802_1_qau_params *)mailbox_in->buf;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
inmod = priv->port | ((1 << i) << 8) |
(MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
/* Before updating QCN parameter,
* need to set it's modify enable bit to 1
*/
hw_qcn->modify_enable_high = cpu_to_be32(
MODIFY_ENABLE_HIGH_MASK);
hw_qcn->modify_enable_low = cpu_to_be32(MODIFY_ENABLE_LOW_MASK);
hw_qcn->extended_enable = cpu_to_be32(qcn->rpg_enable[i] << RPG_ENABLE_BIT);
hw_qcn->rppp_max_rps = cpu_to_be32(qcn->rppp_max_rps[i]);
hw_qcn->rpg_time_reset = cpu_to_be32(qcn->rpg_time_reset[i]);
hw_qcn->rpg_byte_reset = cpu_to_be32(qcn->rpg_byte_reset[i]);
hw_qcn->rpg_threshold = cpu_to_be32(qcn->rpg_threshold[i]);
hw_qcn->rpg_max_rate = cpu_to_be32(qcn->rpg_max_rate[i]);
hw_qcn->rpg_ai_rate = cpu_to_be32(qcn->rpg_ai_rate[i]);
hw_qcn->rpg_hai_rate = cpu_to_be32(qcn->rpg_hai_rate[i]);
hw_qcn->rpg_gd = cpu_to_be32(qcn->rpg_gd[i]);
hw_qcn->rpg_min_dec_fac = cpu_to_be32(qcn->rpg_min_dec_fac[i]);
hw_qcn->rpg_min_rate = cpu_to_be32(qcn->rpg_min_rate[i]);
priv->cndd_state[i] = qcn->cndd_state_machine[i];
if (qcn->cndd_state_machine[i] == DCB_CNDD_INTERIOR_READY)
hw_qcn->extended_enable |= cpu_to_be32(1 << CN_TAG_BIT);
err = mlx4_cmd(priv->mdev->dev, mailbox_in_dma, inmod,
MLX4_CONGESTION_CONTROL_SET_PARAMS,
MLX4_CMD_CONGESTION_CTRL_OPCODE,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err) {
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
return err;
}
}
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_in);
return 0;
}
static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
struct ieee_qcn_stats *qcn_stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *hw_qcn_stats;
struct mlx4_cmd_mailbox *mailbox_out = NULL;
u64 mailbox_in_dma = 0;
u32 inmod = 0;
int i, err;
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QCN))
return -EOPNOTSUPP;
mailbox_out = mlx4_alloc_cmd_mailbox(priv->mdev->dev);
if (IS_ERR(mailbox_out))
return -ENOMEM;
hw_qcn_stats =
(struct mlx4_congestion_control_mb_prio_802_1_qau_statistics *)
mailbox_out->buf;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
inmod = priv->port | ((1 << i) << 8) |
(MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT << 16);
err = mlx4_cmd_box(priv->mdev->dev, mailbox_in_dma,
mailbox_out->dma, inmod,
MLX4_CONGESTION_CONTROL_GET_STATISTICS,
MLX4_CMD_CONGESTION_CTRL_OPCODE,
MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (err) {
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
return err;
}
qcn_stats->rppp_rp_centiseconds[i] =
be64_to_cpu(hw_qcn_stats->rppp_rp_centiseconds);
qcn_stats->rppp_created_rps[i] =
be32_to_cpu(hw_qcn_stats->rppp_created_rps);
}
mlx4_free_cmd_mailbox(priv->mdev->dev, mailbox_out);
return 0;
}
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
.ieee_getets = mlx4_en_dcbnl_ieee_getets,
.ieee_setets = mlx4_en_dcbnl_ieee_setets,
.ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
.ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
.ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
.ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
.ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
.getstate = mlx4_en_dcbnl_get_state,
.setstate = mlx4_en_dcbnl_set_state,
.getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
.setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
.setall = mlx4_en_dcbnl_set_all,
.getcap = mlx4_en_dcbnl_getcap,
.getnumtcs = mlx4_en_dcbnl_getnumtcs,
.getpfcstate = mlx4_en_dcbnl_getpfcstate,
.setpfcstate = mlx4_en_dcbnl_setpfcstate,
.getapp = mlx4_en_dcbnl_getapp,
.setapp = mlx4_en_dcbnl_setapp,
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
};
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
.setstate = mlx4_en_dcbnl_set_state,
.getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
.setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
.setall = mlx4_en_dcbnl_set_all,
.getnumtcs = mlx4_en_dcbnl_getnumtcs,
.getpfcstate = mlx4_en_dcbnl_getpfcstate,
.setpfcstate = mlx4_en_dcbnl_setpfcstate,
.getapp = mlx4_en_dcbnl_getapp,
.setapp = mlx4_en_dcbnl_setapp,
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
};
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/bpf.h>
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <net/ip.h>
#include <net/vxlan.h>
#include <net/devlink.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
#include "mlx4_en.h"
#include "en_port.h"
#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
XDP_PACKET_HEADROOM - \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
int mlx4_en_setup_tc(struct net_device *dev, u8 up)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int i;
unsigned int offset = 0;
if (up && up != MLX4_EN_NUM_UP_HIGH)
return -EINVAL;
netdev_set_num_tc(dev, up);
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
/* Partition Tx queues evenly amongst UP's */
for (i = 0; i < up; i++) {
netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
offset += priv->num_tx_rings_p_up;
}
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (up) {
if (priv->dcbx_cap)
priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
} else {
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
priv->cee_config.pfc_state = false;
}
}
#endif /* CONFIG_MLX4_EN_DCB */
return 0;
}
int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
int total_count;
int port_up = 0;
int err = 0;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
MLX4_EN_NUM_UP_HIGH;
new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
new_prof.num_up;
total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
if (total_count > MAX_TX_RINGS) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
total_count, MAX_TX_RINGS);
goto out;
}
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_safe_replace_resources(priv, tmp);
if (port_up) {
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed starting port for setup TC\n");
goto out;
}
}
err = mlx4_en_setup_tc(dev, tc);
out:
mutex_unlock(&mdev->state_lock);
kfree(tmp);
return err;
}
static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct tc_mqprio_qopt *mqprio = type_data;
if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP;
if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
return -EINVAL;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
struct mlx4_en_filter {
struct list_head next;
struct work_struct work;
u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
__be16 dst_port;
int rxq_index;
struct mlx4_en_priv *priv;
u32 flow_id; /* RFS infrastructure id */
int id; /* mlx4_en driver id */
u64 reg_id; /* Flow steering API id */
u8 activated; /* Used to prevent expiry before filter
* is attached
*/
struct hlist_node filter_chain;
};
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
{
switch (ip_proto) {
case IPPROTO_UDP:
return MLX4_NET_TRANS_RULE_ID_UDP;
case IPPROTO_TCP:
return MLX4_NET_TRANS_RULE_ID_TCP;
default:
return MLX4_NET_TRANS_RULE_NUM;
}
};
/* Must not acquire state_lock, as its corresponding work_sync
* is done under it.
*/
static void mlx4_en_filter_work(struct work_struct *work)
{
struct mlx4_en_filter *filter = container_of(work,
struct mlx4_en_filter,
work);
struct mlx4_en_priv *priv = filter->priv;
struct mlx4_spec_list spec_tcp_udp = {
.id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
{
.tcp_udp = {
.dst_port = filter->dst_port,
.dst_port_msk = (__force __be16)-1,
.src_port = filter->src_port,
.src_port_msk = (__force __be16)-1,
},
},
};
struct mlx4_spec_list spec_ip = {
.id = MLX4_NET_TRANS_RULE_ID_IPV4,
{
.ipv4 = {
.dst_ip = filter->dst_ip,
.dst_ip_msk = (__force __be32)-1,
.src_ip = filter->src_ip,
.src_ip_msk = (__force __be32)-1,
},
},
};
struct mlx4_spec_list spec_eth = {
.id = MLX4_NET_TRANS_RULE_ID_ETH,
};
struct mlx4_net_trans_rule rule = {
.list = LIST_HEAD_INIT(rule.list),
.queue_mode = MLX4_NET_TRANS_Q_LIFO,
.exclusive = 1,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
.port = priv->port,
.priority = MLX4_DOMAIN_RFS,
};
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
filter->ip_proto);
goto ignore;
}
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp_udp.list, &rule.list);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
filter->activated = 0;
if (filter->reg_id) {
rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
if (rc && rc != -ENOENT)
en_err(priv, "Error detaching flow. rc = %d\n", rc);
}
rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
if (rc)
en_err(priv, "Error attaching flow. err = %d\n", rc);
ignore:
mlx4_en_filter_rfs_expire(priv);
filter->activated = 1;
}
static inline struct hlist_head *
filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
__be16 src_port, __be16 dst_port)
{
unsigned long l;
int bucket_idx;
l = (__force unsigned long)src_port |
((__force unsigned long)dst_port << 2);
l ^= (__force unsigned long)(src_ip ^ dst_ip);
bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
return &priv->filter_hash[bucket_idx];
}
static struct mlx4_en_filter *
mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
__be32 dst_ip, u8 ip_proto, __be16 src_port,
__be16 dst_port, u32 flow_id)
{
struct mlx4_en_filter *filter;
filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
if (!filter)
return NULL;
filter->priv = priv;
filter->rxq_index = rxq_index;
INIT_WORK(&filter->work, mlx4_en_filter_work);
filter->src_ip = src_ip;
filter->dst_ip = dst_ip;
filter->ip_proto = ip_proto;
filter->src_port = src_port;
filter->dst_port = dst_port;
filter->flow_id = flow_id;
filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
list_add_tail(&filter->next, &priv->filters);
hlist_add_head(&filter->filter_chain,
filter_hash_bucket(priv, src_ip, dst_ip, src_port,
dst_port));
return filter;
}
static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
{
struct mlx4_en_priv *priv = filter->priv;
int rc;
list_del(&filter->next);
rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
if (rc && rc != -ENOENT)
en_err(priv, "Error detaching flow. rc = %d\n", rc);
kfree(filter);
}
static inline struct mlx4_en_filter *
mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
u8 ip_proto, __be16 src_port, __be16 dst_port)
{
struct mlx4_en_filter *filter;
struct mlx4_en_filter *ret = NULL;
hlist_for_each_entry(filter,
filter_hash_bucket(priv, src_ip, dst_ip,
src_port, dst_port),
filter_chain) {
if (filter->src_ip == src_ip &&
filter->dst_ip == dst_ip &&
filter->ip_proto == ip_proto &&
filter->src_port == src_port &&
filter->dst_port == dst_port) {
ret = filter;
break;
}
}
return ret;
}
static int
mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx4_en_priv *priv = netdev_priv(net_dev);
struct mlx4_en_filter *filter;
const struct iphdr *ip;
const __be16 *ports;
u8 ip_proto;
__be32 src_ip;
__be32 dst_ip;
__be16 src_port;
__be16 dst_port;
int nhoff = skb_network_offset(skb);
int ret = 0;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
if (skb->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
ip = (const struct iphdr *)(skb->data + nhoff);
if (ip_is_fragment(ip))
return -EPROTONOSUPPORT;
if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
return -EPROTONOSUPPORT;
ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
ip_proto = ip->protocol;
src_ip = ip->saddr;
dst_ip = ip->daddr;
src_port = ports[0];
dst_port = ports[1];
spin_lock_bh(&priv->filters_lock);
filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
src_port, dst_port);
if (filter) {
if (filter->rxq_index == rxq_index)
goto out;
filter->rxq_index = rxq_index;
} else {
filter = mlx4_en_filter_alloc(priv, rxq_index,
src_ip, dst_ip, ip_proto,
src_port, dst_port, flow_id);
if (!filter) {
ret = -ENOMEM;
goto err;
}
}
queue_work(priv->mdev->workqueue, &filter->work);
out:
ret = filter->id;
err:
spin_unlock_bh(&priv->filters_lock);
return ret;
}
void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter, *tmp;
LIST_HEAD(del_list);
spin_lock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
list_move(&filter->next, &del_list);
hlist_del(&filter->filter_chain);
}
spin_unlock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &del_list, next) {
cancel_work_sync(&filter->work);
mlx4_en_filter_free(filter);
}
}
static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
{
struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
LIST_HEAD(del_list);
int i = 0;
spin_lock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
break;
if (filter->activated &&
!work_pending(&filter->work) &&
rps_may_expire_flow(priv->dev,
filter->rxq_index, filter->flow_id,
filter->id)) {
list_move(&filter->next, &del_list);
hlist_del(&filter->filter_chain);
} else
last_filter = filter;
i++;
}
if (last_filter && (&last_filter->next != priv->filters.next))
list_move(&priv->filters, &last_filter->next);
spin_unlock_bh(&priv->filters_lock);
list_for_each_entry_safe(filter, tmp, &del_list, next)
mlx4_en_filter_free(filter);
}
#endif
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
int idx;
en_dbg(HW, priv, "adding VLAN:%d\n", vid);
set_bit(vid, priv->active_vlans);
/* Add VID to port VLAN filter */
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
goto out;
}
}
err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
if (err)
en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
clear_bit(vid, priv->active_vlans);
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
mlx4_unregister_vlan(mdev->dev, priv->port, vid);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
return err;
}
static void mlx4_en_u64_to_mac(struct net_device *dev, u64 src_mac)
{
u8 addr[ETH_ALEN];
u64_to_ether_addr(src_mac, addr);
eth_hw_addr_set(dev, addr);
}
static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv,
const unsigned char *addr,
int qpn, u64 *reg_id)
{
int err;
if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
MLX4_DOMAIN_NIC, reg_id);
if (err) {
en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
return err;
}
en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
return 0;
}
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
const unsigned char *mac, int *qpn, u64 *reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = *qpn;
memcpy(&gid[10], mac, ETH_ALEN);
gid[5] = priv->port;
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
struct mlx4_spec_list spec_eth = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC,
};
rule.port = priv->port;
rule.qpn = *qpn;
INIT_LIST_HEAD(&rule.list);
spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
list_add_tail(&spec_eth.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
break;
}
default:
return -EINVAL;
}
if (err)
en_warn(priv, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
const unsigned char *mac,
int qpn, u64 reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = qpn;
memcpy(&gid[10], mac, ETH_ALEN);
gid[5] = priv->port;
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
mlx4_flow_detach(dev, reg_id);
break;
}
default:
en_err(priv, "Invalid steering mode.\n");
}
}
static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int index = 0;
int err = 0;
int *qpn = &priv->base_qpn;
u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
index = mlx4_register_mac(dev, priv->port, mac);
if (index < 0) {
err = index;
en_err(priv, "Failed adding MAC: %pM\n",
priv->dev->dev_addr);
return err;
}
en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
int base_qpn = mlx4_get_base_qpn(dev, priv->port);
*qpn = base_qpn + index;
return 0;
}
err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP,
MLX4_RES_USAGE_DRIVER);
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
mlx4_unregister_mac(dev, priv->port, mac);
return err;
}
return 0;
}
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
} else {
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
priv->port, qpn);
mlx4_qp_release_range(dev, qpn, 1);
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
}
}
static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
unsigned char *new_mac, unsigned char *prev_mac)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
u64 new_mac_u64 = ether_addr_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
u64 prev_mac_u64 = ether_addr_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
mlx4_unregister_mac(dev, priv->port,
prev_mac_u64);
hlist_del_rcu(&entry->hlist);
synchronize_rcu();
memcpy(entry->mac, new_mac, ETH_ALEN);
entry->reg_id = 0;
mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
hlist_add_head_rcu(&entry->hlist,
&priv->mac_hash[mac_hash]);
mlx4_register_mac(dev, priv->port, new_mac_u64);
err = mlx4_en_uc_steer_add(priv, new_mac,
&qpn,
&entry->reg_id);
if (err)
return err;
if (priv->tunnel_reg_id) {
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
priv->tunnel_reg_id = 0;
}
err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
&priv->tunnel_reg_id);
return err;
}
}
return -EINVAL;
}
return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
}
static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv,
unsigned char new_mac[ETH_ALEN + 2])
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN))
return;
err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac);
if (err)
en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n",
new_mac, priv->port, err);
}
static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
unsigned char new_mac[ETH_ALEN + 2])
{
int err = 0;
if (priv->port_up) {
/* Remove old MAC and insert the new one */
err = mlx4_en_replace_mac(priv, priv->base_qpn,
new_mac, priv->current_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
} else
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
if (!err)
memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
return err;
}
static int mlx4_en_set_mac(struct net_device *dev, void *addr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct sockaddr *saddr = addr;
unsigned char new_mac[ETH_ALEN + 2];
int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
mutex_lock(&mdev->state_lock);
memcpy(new_mac, saddr->sa_data, ETH_ALEN);
err = mlx4_en_do_set_mac(priv, new_mac);
if (err)
goto out;
eth_hw_addr_set(dev, saddr->sa_data);
mlx4_en_update_user_mac(priv, new_mac);
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_mc_list *tmp, *mc_to_del;
list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
list_del(&mc_to_del->list);
kfree(mc_to_del);
}
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
struct mlx4_en_mc_list *tmp;
mlx4_en_clear_list(dev);
netdev_for_each_mc_addr(ha, dev) {
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
if (!tmp) {
mlx4_en_clear_list(dev);
return;
}
memcpy(tmp->addr, ha->addr, ETH_ALEN);
list_add_tail(&tmp->list, &priv->mc_list);
}
}
static void update_mclist_flags(struct mlx4_en_priv *priv,
struct list_head *dst,
struct list_head *src)
{
struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
bool found;
/* Find all the entries that should be removed from dst,
* These are the entries that are not found in src
*/
list_for_each_entry(dst_tmp, dst, list) {
found = false;
list_for_each_entry(src_tmp, src, list) {
if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
found = true;
break;
}
}
if (!found)
dst_tmp->action = MCLIST_REM;
}
/* Add entries that exist in src but not in dst
* mark them as need to add
*/
list_for_each_entry(src_tmp, src, list) {
found = false;
list_for_each_entry(dst_tmp, dst, list) {
if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
dst_tmp->action = MCLIST_NONE;
found = true;
break;
}
}
if (!found) {
new_mc = kmemdup(src_tmp,
sizeof(struct mlx4_en_mc_list),
GFP_KERNEL);
if (!new_mc)
return;
new_mc->action = MCLIST_ADD;
list_add_tail(&new_mc->list, dst);
}
}
}
static void mlx4_en_set_rx_mode(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->port_up)
return;
queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
}
static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev)
{
int err = 0;
if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
if (netif_msg_rx_status(priv))
en_warn(priv, "Entering promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port,
priv->base_qpn,
MLX4_FS_ALL_DEFAULT);
if (err)
en_err(priv, "Failed enabling promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_unicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed enabling unicast promiscuous mode\n");
/* Add the default qp number as multicast
* promisc
*/
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
err = mlx4_multicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed enabling multicast promiscuous mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
priv->base_qpn,
1);
if (err)
en_err(priv, "Failed enabling promiscuous mode\n");
break;
}
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
}
}
static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev)
{
int err = 0;
if (netif_msg_rx_status(priv))
en_warn(priv, "Leaving promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_ALL_DEFAULT);
if (err)
en_err(priv, "Failed disabling promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_unicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed disabling unicast promiscuous mode\n");
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
err = mlx4_multicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
priv->base_qpn, 0);
if (err)
en_err(priv, "Failed disabling promiscuous mode\n");
break;
}
}
static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
struct net_device *dev,
struct mlx4_en_dev *mdev)
{
struct mlx4_en_mc_list *mclist, *tmp;
u64 mcast_addr = 0;
u8 mc_list[16] = {0};
int err = 0;
/* Enable/disable the multicast filter according to IFF_ALLMULTI */
if (dev->flags & IFF_ALLMULTI) {
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
/* Add the default qp number as multicast promisc */
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_add(mdev->dev,
priv->port,
priv->base_qpn,
MLX4_FS_MC_DEFAULT);
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_multicast_promisc_add(mdev->dev,
priv->base_qpn,
priv->port);
break;
case MLX4_STEERING_MODE_A0:
break;
}
if (err)
en_err(priv, "Failed entering multicast promisc mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
} else {
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
err = mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_MC_DEFAULT);
break;
case MLX4_STEERING_MODE_B0:
err = mlx4_multicast_promisc_remove(mdev->dev,
priv->base_qpn,
priv->port);
break;
case MLX4_STEERING_MODE_A0:
break;
}
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE);
if (err)
en_err(priv, "Failed disabling multicast filter\n");
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1, MLX4_MCAST_CONFIG);
/* Update multicast list - we cache all addresses so they won't
* change while HW is updated holding the command semaphor */
netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
mcast_addr = ether_addr_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_ENABLE);
if (err)
en_err(priv, "Failed enabling multicast filter\n");
update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
if (mclist->action == MCLIST_REM) {
/* detach this address and delete from list */
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
err = mlx4_multicast_detach(mdev->dev,
priv->rss_map.indir_qp,
mc_list,
MLX4_PROT_ETH,
mclist->reg_id);
if (err)
en_err(priv, "Fail to detach multicast address\n");
if (mclist->tunnel_reg_id) {
err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
if (err)
en_err(priv, "Failed to detach multicast address\n");
}
/* remove from list */
list_del(&mclist->list);
kfree(mclist);
} else if (mclist->action == MCLIST_ADD) {
/* attach the address */
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
/* needed for B0 steering support */
mc_list[5] = priv->port;
err = mlx4_multicast_attach(mdev->dev,
priv->rss_map.indir_qp,
mc_list,
priv->port, 0,
MLX4_PROT_ETH,
&mclist->reg_id);
if (err)
en_err(priv, "Fail to attach multicast address\n");
err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
&mclist->tunnel_reg_id);
if (err)
en_err(priv, "Failed to attach multicast address\n");
}
}
}
}
static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
struct net_device *dev,
struct mlx4_en_dev *mdev)
{
struct netdev_hw_addr *ha;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
bool found;
u64 mac;
int err = 0;
struct hlist_head *bucket;
unsigned int i;
int removed = 0;
u32 prev_flags;
/* Note that we do not need to protect our mac_hash traversal with rcu,
* since all modification code is protected by mdev->state_lock
*/
/* find what to remove */
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
found = false;
netdev_for_each_uc_addr(ha, dev) {
if (ether_addr_equal_64bits(entry->mac,
ha->addr)) {
found = true;
break;
}
}
/* MAC address of the port is not in uc list */
if (ether_addr_equal_64bits(entry->mac,
priv->current_mac))
found = true;
if (!found) {
mac = ether_addr_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
mlx4_unregister_mac(mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
entry->mac, priv->port);
++removed;
}
}
}
/* if we didn't remove anything, there is no use in trying to add
* again once we are in a forced promisc mode state
*/
if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
return;
prev_flags = priv->flags;
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
/* find what to add */
netdev_for_each_uc_addr(ha, dev) {
found = false;
bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
found = true;
break;
}
}
if (!found) {
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
ha->addr, priv->port);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
mac = ether_addr_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
ha->addr, priv->port, err);
kfree(entry);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
err = mlx4_en_uc_steer_add(priv, ha->addr,
&priv->base_qpn,
&entry->reg_id);
if (err) {
en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
ha->addr, priv->port, err);
mlx4_unregister_mac(mdev->dev, priv->port, mac);
kfree(entry);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
} else {
unsigned int mac_hash;
en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
ha->addr, priv->port);
mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
hlist_add_head_rcu(&entry->hlist, bucket);
}
}
}
if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
en_warn(priv, "Forcing promiscuous mode on port:%d\n",
priv->port);
} else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
priv->port);
}
}
static void mlx4_en_do_set_rx_mode(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
rx_mode_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
goto out;
}
if (!priv->port_up) {
en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
goto out;
}
if (!netif_carrier_ok(dev)) {
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
netif_carrier_on(dev);
en_dbg(LINK, priv, "Link Up\n");
}
}
}
if (dev->priv_flags & IFF_UNICAST_FLT)
mlx4_en_do_uc_filter(priv, dev, mdev);
/* Promsicuous mode: disable all filters */
if ((dev->flags & IFF_PROMISC) ||
(priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
mlx4_en_set_promisc_mode(priv, mdev);
goto out;
}
/* Not in promiscuous mode */
if (priv->flags & MLX4_EN_FLAG_PROMISC)
mlx4_en_clear_promisc_mode(priv, mdev);
mlx4_en_do_multicast(priv, dev, mdev);
out:
mutex_unlock(&mdev->state_lock);
}
static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
{
u64 reg_id;
int err = 0;
int *qpn = &priv->base_qpn;
struct mlx4_mac_entry *entry;
err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
if (err)
return err;
err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
&priv->tunnel_reg_id);
if (err)
goto tunnel_err;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
goto alloc_err;
}
memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
entry->reg_id = reg_id;
hlist_add_head_rcu(&entry->hlist,
&priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
return 0;
alloc_err:
if (priv->tunnel_reg_id)
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
tunnel_err:
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
return err;
}
static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
{
u64 mac;
unsigned int i;
int qpn = priv->base_qpn;
struct hlist_head *bucket;
struct hlist_node *tmp;
struct mlx4_mac_entry *entry;
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
mac = ether_addr_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
}
}
if (priv->tunnel_reg_id) {
mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
priv->tunnel_reg_id = 0;
}
}
static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][txqueue];
if (netif_msg_timer(priv))
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
txqueue, tx_ring->qpn, tx_ring->sp_cqn,
tx_ring->cons, tx_ring->prod);
priv->port_stats.tx_timeout++;
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
en_dbg(DRV, priv, "Scheduling port restart\n");
queue_work(mdev->workqueue, &priv->restart_task);
}
}
static void
mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
mlx4_en_fold_software_stats(dev);
netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
{
struct mlx4_en_cq *cq;
int i, t;
/* If we haven't received a specific coalescing setting
* (module param), we set the moderation parameters as follows:
* - moder_cnt is set to the number of mtu sized packets to
* satisfy our coalescing target.
* - moder_time is set to a fixed value.
*/
priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
cq->moder_cnt = priv->rx_frames;
cq->moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
priv->last_moder_packets[i] = 0;
priv->last_moder_bytes[i] = 0;
}
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
cq = priv->tx_cq[t][i];
cq->moder_cnt = priv->tx_frames;
cq->moder_time = priv->tx_usecs;
}
}
/* Reset auto-moderation params */
priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
priv->adaptive_rx_coal = 1;
priv->last_moder_jiffies = 0;
priv->last_moder_tx_packets = 0;
}
static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
{
unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
u32 pkt_rate_high, pkt_rate_low;
struct mlx4_en_cq *cq;
unsigned long packets;
unsigned long rate;
unsigned long avg_pkt_size;
unsigned long rx_packets;
unsigned long rx_bytes;
unsigned long rx_pkt_diff;
int moder_time;
int ring, err;
if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
return;
pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
for (ring = 0; ring < priv->rx_ring_num; ring++) {
rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
packets = rx_pkt_diff;
rate = packets * HZ / period;
avg_pkt_size = packets ? (rx_bytes -
priv->last_moder_bytes[ring]) / packets : 0;
/* Apply auto-moderation only when packet rate
* exceeds a rate that it matters */
if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
if (rate <= pkt_rate_low)
moder_time = priv->rx_usecs_low;
else if (rate >= pkt_rate_high)
moder_time = priv->rx_usecs_high;
else
moder_time = (rate - pkt_rate_low) *
(priv->rx_usecs_high - priv->rx_usecs_low) /
(pkt_rate_high - pkt_rate_low) +
priv->rx_usecs_low;
} else {
moder_time = priv->rx_usecs_low;
}
cq = priv->rx_cq[ring];
if (moder_time != priv->last_moder_time[ring] ||
cq->moder_cnt != priv->rx_frames) {
priv->last_moder_time[ring] = moder_time;
cq->moder_time = moder_time;
cq->moder_cnt = priv->rx_frames;
err = mlx4_en_set_cq_moder(priv, cq);
if (err)
en_err(priv, "Failed modifying moderation for cq:%d\n",
ring);
}
priv->last_moder_packets[ring] = rx_packets;
priv->last_moder_bytes[ring] = rx_bytes;
}
priv->last_moder_jiffies = jiffies;
}
static void mlx4_en_do_get_stats(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
stats_task);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
if (priv->port_up) {
err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
if (err)
en_dbg(HW, priv, "Could not update stats\n");
mlx4_en_auto_moderation(priv);
}
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
}
if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
mlx4_en_do_set_mac(priv, priv->current_mac);
mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
}
mutex_unlock(&mdev->state_lock);
}
/* mlx4_en_service_task - Run service task for tasks that needed to be done
* periodically
*/
static void mlx4_en_service_task(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
service_task);
struct mlx4_en_dev *mdev = priv->mdev;
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_ptp_overflow_check(mdev);
mlx4_en_recover_from_oom(priv);
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
}
mutex_unlock(&mdev->state_lock);
}
static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_state *port_state = &priv->port_state;
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
bool up;
if (mlx4_en_QUERY_PORT(mdev, priv->port))
port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
if (up == netif_carrier_ok(dev))
netif_carrier_event(dev);
if (!up) {
en_info(priv, "Link Down\n");
netif_carrier_off(dev);
} else {
en_info(priv, "Link Up\n");
netif_carrier_on(dev);
}
}
static void mlx4_en_linkstate_work(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
linkstate_task);
struct mlx4_en_dev *mdev = priv->mdev;
mutex_lock(&mdev->state_lock);
mlx4_en_linkstate(priv);
mutex_unlock(&mdev->state_lock);
}
static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
{
struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
int numa_node = priv->mdev->dev->numa_node;
if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
ring->affinity_mask);
return 0;
}
static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
{
free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
}
static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
int tx_ring_idx)
{
struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
int rr_index = tx_ring_idx;
tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
tx_ring->recycle_ring = priv->rx_ring[rr_index];
en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
TX_XDP, tx_ring_idx, rr_index);
}
int mlx4_en_start_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq;
struct mlx4_en_tx_ring *tx_ring;
int rx_index = 0;
int err = 0;
int i, t;
int j;
u8 mc_list[16] = {0};
if (priv->port_up) {
en_dbg(DRV, priv, "start port called while port already up\n");
return 0;
}
INIT_LIST_HEAD(&priv->mc_list);
INIT_LIST_HEAD(&priv->curr_list);
INIT_LIST_HEAD(&priv->ethtool_list);
memset(&priv->ethtool_rules[0], 0,
sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
mlx4_en_calc_rx_buf(dev);
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
/* Configure rx cq's and rings */
err = mlx4_en_activate_rx_rings(priv);
if (err) {
en_err(priv, "Failed to activate RX rings\n");
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
err = mlx4_en_init_affinity_hint(priv, i);
if (err) {
en_err(priv, "Failed preparing IRQ affinity hint\n");
goto cq_err;
}
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
for (j = 0; j < cq->size; j++) {
struct mlx4_cqe *cqe = NULL;
cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
priv->cqe_factor;
cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
priv->rx_ring[i]->cqn = cq->mcq.cqn;
++rx_index;
}
/* Set qp number */
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
err = mlx4_en_get_qp(priv);
if (err) {
en_err(priv, "Failed getting eth qp\n");
goto cq_err;
}
mdev->mac_removed[priv->port] = 0;
priv->counter_index =
mlx4_get_default_counter_index(mdev->dev, priv->port);
err = mlx4_en_config_rss_steer(priv);
if (err) {
en_err(priv, "Failed configuring rss steering\n");
goto mac_err;
}
err = mlx4_en_create_drop_qp(priv);
if (err)
goto rss_err;
/* Configure tx cq's and rings */
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
u8 num_tx_rings_p_up = t == TX ?
priv->num_tx_rings_p_up : priv->tx_ring_num[t];
for (i = 0; i < priv->tx_ring_num[t]; i++) {
/* Configure cq */
cq = priv->tx_cq[t][i];
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed allocating Tx CQ\n");
goto tx_err;
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
en_dbg(DRV, priv,
"Resetting index of collapsed CQ:%d to -1\n", i);
cq->buf->wqe_index = cpu_to_be16(0xffff);
/* Configure ring */
tx_ring = priv->tx_ring[t][i];
err = mlx4_en_activate_tx_ring(priv, tx_ring,
cq->mcq.cqn,
i / num_tx_rings_p_up);
if (err) {
en_err(priv, "Failed allocating Tx ring\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
if (t != TX_XDP) {
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
tx_ring->recycle_ring = NULL;
/* Arm CQ for TX completions */
mlx4_en_arm_cq(priv, cq);
} else {
mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
mlx4_en_init_recycle_ring(priv, i);
/* XDP TX CQ should never be armed */
}
/* Set initial ownership of all Tx TXBBs to SW (1) */
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
*((u32 *)(tx_ring->buf + j)) = 0xffffffff;
}
}
/* Configure port */
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
priv->prof->tx_pause,
priv->prof->tx_ppp,
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
priv->port, err);
goto tx_err;
}
err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
if (err) {
en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
dev->mtu, priv->port, err);
goto tx_err;
}
/* Set default qp number */
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
if (err) {
en_err(priv, "Failed setting default qp numbers\n");
goto tx_err;
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
if (err) {
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
err);
goto tx_err;
}
}
/* Init port */
en_dbg(HW, priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
if (err) {
en_err(priv, "Failed Initializing port\n");
goto tx_err;
}
/* Set Unicast and VXLAN steering rules */
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
mlx4_en_set_rss_steer_rules(priv))
mlx4_warn(mdev, "Failed setting steering rules\n");
/* Attach rx QP to bradcast address */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
priv->port, 0, MLX4_PROT_ETH,
&priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->rx_mode_task);
if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
udp_tunnel_nic_reset_ntf(dev);
priv->port_up = true;
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
for (i = 0; i < priv->rx_ring_num; i++) {
local_bh_disable();
napi_schedule(&priv->rx_cq[i]->napi);
local_bh_enable();
}
clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
return 0;
tx_err:
if (t == MLX4_EN_NUM_TX_TYPES) {
t--;
i = priv->tx_ring_num[t];
}
while (t >= 0) {
while (i--) {
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
}
if (!t--)
break;
i = priv->tx_ring_num[t];
}
mlx4_en_destroy_drop_qp(priv);
rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
mlx4_en_put_qp(priv);
cq_err:
while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
mlx4_en_free_affinity_hint(priv, rx_index);
}
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
return err; /* need to close devices */
}
void mlx4_en_stop_port(struct net_device *dev, int detach)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_mc_list *mclist, *tmp;
struct ethtool_flow_id *flow, *tmp_flow;
int i, t;
u8 mc_list[16] = {0};
if (!priv->port_up) {
en_dbg(DRV, priv, "stop port called while port already down\n");
return;
}
/* close port*/
mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
if (detach)
netif_device_detach(dev);
netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
netif_tx_disable(dev);
spin_lock_bh(&priv->stats_lock);
mlx4_en_fold_software_stats(dev);
/* Set port as not active */
priv->port_up = false;
spin_unlock_bh(&priv->stats_lock);
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
/* Promsicuous mode */
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
MLX4_EN_FLAG_MC_PROMISC);
mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_ALL_DEFAULT);
mlx4_flow_steer_promisc_remove(mdev->dev,
priv->port,
MLX4_FS_MC_DEFAULT);
} else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
}
/* Detach All multicasts */
eth_broadcast_addr(&mc_list[10]);
mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
MLX4_PROT_ETH, priv->broadcast_id);
list_for_each_entry(mclist, &priv->curr_list, list) {
memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id);
if (mclist->tunnel_reg_id)
mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
}
mlx4_en_clear_list(dev);
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
list_del(&mclist->list);
kfree(mclist);
}
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
/* Remove flow steering rules for the port*/
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
ASSERT_RTNL();
list_for_each_entry_safe(flow, tmp_flow,
&priv->ethtool_list, list) {
mlx4_flow_detach(mdev->dev, flow->id);
list_del(&flow->list);
}
}
mlx4_en_destroy_drop_qp(priv);
/* Free TX Rings */
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
}
}
msleep(10);
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
for (i = 0; i < priv->tx_ring_num[t]; i++)
mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
mlx4_en_delete_rss_steer_rules(priv);
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
mlx4_en_put_qp(priv);
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1;
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
struct mlx4_en_cq *cq = priv->rx_cq[i];
napi_synchronize(&cq->napi);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
mlx4_en_free_affinity_hint(priv, i);
}
}
static void mlx4_en_restart(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
restart_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
rtnl_lock();
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
rtnl_unlock();
}
static void mlx4_en_clear_stats(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_tx_ring **tx_ring;
int i;
if (!mlx4_is_slave(mdev->dev))
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
memset(&priv->rx_priority_flowstats, 0,
sizeof(priv->rx_priority_flowstats));
memset(&priv->tx_priority_flowstats, 0,
sizeof(priv->tx_priority_flowstats));
memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
tx_ring = priv->tx_ring[TX];
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
tx_ring[i]->bytes = 0;
tx_ring[i]->packets = 0;
tx_ring[i]->tx_csum = 0;
tx_ring[i]->tx_dropped = 0;
tx_ring[i]->queue_stopped = 0;
tx_ring[i]->wake_queue = 0;
tx_ring[i]->tso_packets = 0;
tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
priv->rx_ring[i]->packets = 0;
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
priv->rx_ring[i]->csum_complete = 0;
}
}
static int mlx4_en_open(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
en_err(priv, "Cannot open - device down/disabled\n");
err = -EBUSY;
goto out;
}
/* Reset HW statistics and SW counters */
mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed starting port:%d\n", priv->port);
goto out;
}
mlx4_en_linkstate(priv);
out:
mutex_unlock(&mdev->state_lock);
return err;
}
static int mlx4_en_close(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(IFDOWN, priv, "Close port called\n");
mutex_lock(&mdev->state_lock);
mlx4_en_stop_port(dev, 0);
netif_carrier_off(dev);
mutex_unlock(&mdev->state_lock);
return 0;
}
static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i, t;
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap = NULL;
#endif
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
if (priv->tx_ring[t] && priv->tx_ring[t][i])
mlx4_en_destroy_tx_ring(priv,
&priv->tx_ring[t][i]);
if (priv->tx_cq[t] && priv->tx_cq[t][i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
}
kfree(priv->tx_ring[t]);
kfree(priv->tx_cq[t]);
}
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
priv->prof->rx_ring_size, priv->stride);
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
}
static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i, t;
int node;
/* Create tx Rings */
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
prof->tx_ring_size, i, t, node))
goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
prof->tx_ring_size,
TXBB_SIZE, node, i))
goto err;
}
}
/* Create rx Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
node = cpu_to_node(i % num_online_cpus());
if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
prof->rx_ring_size, i, RX, node))
goto err;
if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size, priv->stride,
node, i))
goto err;
}
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
#endif
return 0;
err:
en_err(priv, "Failed to allocate NIC resources\n");
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i])
mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
prof->rx_ring_size,
priv->stride);
if (priv->rx_cq[i])
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
for (i = 0; i < priv->tx_ring_num[t]; i++) {
if (priv->tx_ring[t][i])
mlx4_en_destroy_tx_ring(priv,
&priv->tx_ring[t][i]);
if (priv->tx_cq[t][i])
mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
}
}
return -ENOMEM;
}
static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src,
struct mlx4_en_port_profile *prof)
{
int t;
memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
sizeof(dst->hwtstamp_config));
dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
dst->rx_ring_num = prof->rx_ring_num;
dst->flags = prof->flags;
dst->mdev = src->mdev;
dst->port = src->port;
dst->dev = src->dev;
dst->prof = prof;
dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
dst->tx_ring_num[t] = prof->tx_ring_num[t];
if (!dst->tx_ring_num[t])
continue;
dst->tx_ring[t] = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_tx_ring *),
GFP_KERNEL);
if (!dst->tx_ring[t])
goto err_free_tx;
dst->tx_cq[t] = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_cq *),
GFP_KERNEL);
if (!dst->tx_cq[t]) {
kfree(dst->tx_ring[t]);
goto err_free_tx;
}
}
return 0;
err_free_tx:
while (t--) {
kfree(dst->tx_ring[t]);
kfree(dst->tx_cq[t]);
}
return -ENOMEM;
}
static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
struct mlx4_en_priv *src)
{
int t;
memcpy(dst->rx_ring, src->rx_ring,
sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
memcpy(dst->rx_cq, src->rx_cq,
sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
sizeof(dst->hwtstamp_config));
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
dst->tx_ring_num[t] = src->tx_ring_num[t];
dst->tx_ring[t] = src->tx_ring[t];
dst->tx_cq[t] = src->tx_cq[t];
}
dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
dst->rx_ring_num = src->rx_ring_num;
memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
}
int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp,
struct mlx4_en_port_profile *prof,
bool carry_xdp_prog)
{
struct bpf_prog *xdp_prog;
int i, t, ret;
ret = mlx4_en_copy_priv(tmp, priv, prof);
if (ret) {
en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
__func__);
return ret;
}
if (mlx4_en_alloc_resources(tmp)) {
en_warn(priv,
"%s: Resource allocation failed, using previous configuration\n",
__func__);
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
kfree(tmp->tx_ring[t]);
kfree(tmp->tx_cq[t]);
}
return -ENOMEM;
}
/* All rx_rings has the same xdp_prog. Pick the first one. */
xdp_prog = rcu_dereference_protected(
priv->rx_ring[0]->xdp_prog,
lockdep_is_held(&priv->mdev->state_lock));
if (xdp_prog && carry_xdp_prog) {
bpf_prog_add(xdp_prog, tmp->rx_ring_num);
for (i = 0; i < tmp->rx_ring_num; i++)
rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
xdp_prog);
}
return 0;
}
void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
struct mlx4_en_priv *tmp)
{
mlx4_en_free_resources(priv);
mlx4_en_update_priv(priv, tmp);
}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
/* Unregister device - this will close the port if it was up */
if (priv->registered)
unregister_netdev(dev);
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
cancel_delayed_work(&priv->stats_task);
cancel_delayed_work(&priv->service_task);
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_remove_timestamp(mdev);
/* Detach the netdev so tasks would not attempt to access it */
mutex_lock(&mdev->state_lock);
mdev->pndev[priv->port] = NULL;
mdev->upper[priv->port] = NULL;
#ifdef CONFIG_RFS_ACCEL
mlx4_en_cleanup_filters(priv);
#endif
mlx4_en_free_resources(priv);
mutex_unlock(&mdev->state_lock);
free_netdev(dev);
}
static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (mtu > MLX4_EN_MAX_XDP_MTU) {
en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
mtu, MLX4_EN_MAX_XDP_MTU);
return false;
}
return true;
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
dev->mtu, new_mtu);
if (priv->tx_ring_num[TX_XDP] &&
!mlx4_en_check_xdp_mtu(dev, new_mtu))
return -EOPNOTSUPP;
dev->mtu = new_mtu;
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
/* NIC is probably restarting - let restart task reset
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
mlx4_en_stop_port(dev, 1);
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed restarting port:%d\n",
priv->port);
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
&priv->state))
queue_work(mdev->workqueue, &priv->restart_task);
}
}
mutex_unlock(&mdev->state_lock);
}
return 0;
}
static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct hwtstamp_config config;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* device doesn't support time stamping */
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
return -EINVAL;
/* TX HW timestamp */
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
if (mlx4_en_reset_config(dev, config, dev->features)) {
config.tx_type = HWTSTAMP_TX_OFF;
config.rx_filter = HWTSTAMP_FILTER_NONE;
}
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
}
static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
return mlx4_en_hwtstamp_set(dev, ifr);
case SIOCGHWTSTAMP:
return mlx4_en_hwtstamp_get(dev, ifr);
default:
return -EOPNOTSUPP;
}
}
static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx4_en_priv *en_priv = netdev_priv(netdev);
struct mlx4_en_dev *mdev = en_priv->mdev;
/* Since there is no support for separate RX C-TAG/S-TAG vlan accel
* enable/disable make sure S-TAG flag is always in same state as
* C-TAG.
*/
if (features & NETIF_F_HW_VLAN_CTAG_RX &&
!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
features |= NETIF_F_HW_VLAN_STAG_RX;
else
features &= ~NETIF_F_HW_VLAN_STAG_RX;
return features;
}
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
bool reset = false;
int ret = 0;
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
en_info(priv, "Turn %s RX-FCS\n",
(features & NETIF_F_RXFCS) ? "ON" : "OFF");
reset = true;
}
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
en_info(priv, "Turn %s RX-ALL\n",
ignore_fcs_value ? "ON" : "OFF");
ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
priv->port, ignore_fcs_value);
if (ret)
return ret;
}
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
en_info(priv, "Turn %s RX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
reset = true;
}
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
en_info(priv, "Turn %s TX S-VLAN strip offload\n",
(features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
en_info(priv, "Turn %s loopback\n",
(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
mlx4_en_update_loopback_state(netdev, features);
}
if (reset) {
ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
features);
if (ret)
return ret;
}
return 0;
}
static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
}
static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
vlan_proto);
}
static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
int max_tx_rate)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
max_tx_rate);
}
static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
}
static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
}
static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
}
static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
struct ifla_vf_stats *vf_stats)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
}
#define PORT_ID_BYTE_LEN 8
static int mlx4_en_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_dev *mdev = priv->mdev->dev;
int i;
u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
if (!phys_port_id)
return -EOPNOTSUPP;
ppid->id_len = sizeof(phys_port_id);
for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
ppid->id[i] = phys_port_id & 0xff;
phys_port_id >>= 8;
}
return 0;
}
static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct udp_tunnel_info ti;
int ret;
udp_tunnel_nic_get_port(dev, table, 0, &ti);
priv->vxlan_port = ti.port;
ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
if (ret)
return ret;
return mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC,
!!priv->vxlan_port);
}
static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
.sync_table = mlx4_udp_tunnel_sync,
.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
.tables = {
{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
},
};
static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
/* The ConnectX-3 doesn't support outer IPv6 checksums but it does
* support inner IPv6 checksums and segmentation so we need to
* strip that feature if this is an IPv6 encapsulated frame.
*/
if (skb->encapsulation &&
(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->vxlan_port ||
(ip_hdr(skb)->version != 4) ||
(udp_hdr(skb)->dest != priv->vxlan_port))
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
return features;
}
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
struct mlx4_update_qp_params params;
int err;
if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
return -EOPNOTSUPP;
/* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
if (maxrate >> 12) {
params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
params.rate_val = maxrate / 1000;
} else if (maxrate) {
params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
params.rate_val = maxrate;
} else { /* zero serves to revoke the QP rate-limitation */
params.rate_unit = 0;
params.rate_val = 0;
}
err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
¶ms);
return err;
}
static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct bpf_prog *old_prog;
struct mlx4_en_priv *tmp;
int tx_changed = 0;
int xdp_ring_num;
int port_up = 0;
int err;
int i;
xdp_ring_num = prog ? priv->rx_ring_num : 0;
/* No need to reconfigure buffers when simply swapping the
* program for a new one.
*/
if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
if (prog)
bpf_prog_add(prog, priv->rx_ring_num - 1);
mutex_lock(&mdev->state_lock);
for (i = 0; i < priv->rx_ring_num; i++) {
old_prog = rcu_dereference_protected(
priv->rx_ring[i]->xdp_prog,
lockdep_is_held(&mdev->state_lock));
rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
}
mutex_unlock(&mdev->state_lock);
return 0;
}
if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
return -EOPNOTSUPP;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
if (prog)
bpf_prog_add(prog, priv->rx_ring_num - 1);
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
tx_changed = 1;
new_prof.tx_ring_num[TX] =
MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
}
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
if (err) {
if (prog)
bpf_prog_sub(prog, priv->rx_ring_num - 1);
goto unlock_out;
}
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_safe_replace_resources(priv, tmp);
if (tx_changed)
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
for (i = 0; i < priv->rx_ring_num; i++) {
old_prog = rcu_dereference_protected(
priv->rx_ring[i]->xdp_prog,
lockdep_is_held(&mdev->state_lock));
rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed starting port %d for XDP change\n",
priv->port);
if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
queue_work(mdev->workqueue, &priv->restart_task);
}
}
unlock_out:
mutex_unlock(&mdev->state_lock);
kfree(tmp);
return err;
}
static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return mlx4_xdp_set(dev, xdp->prog);
default:
return -EINVAL;
}
}
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
.ndo_eth_ioctl = mlx4_en_ioctl,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
.ndo_set_vf_mac = mlx4_en_set_vf_mac,
.ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
.ndo_set_vf_rate = mlx4_en_set_vf_rate,
.ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
.ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
};
static const struct xdp_metadata_ops mlx4_xdp_metadata_ops = {
.xmo_rx_timestamp = mlx4_en_xdp_rx_timestamp,
.xmo_rx_hash = mlx4_en_xdp_rx_hash,
};
int mlx4_en_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
u8 port = 0;
struct mlx4_en_dev *mdev;
struct mlx4_dev *dev;
int i, num_eth_ports = 0;
bool do_bond = true;
u8 v2p_port1 = 0;
u8 v2p_port2 = 0;
if (!net_eq(dev_net(ndev), &init_net))
return NOTIFY_DONE;
mdev = container_of(this, struct mlx4_en_dev, netdev_nb);
dev = mdev->dev;
/* Go into this mode only when two network devices set on two ports
* of the same mlx4 device are slaves of the same bonding master
*/
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
++num_eth_ports;
if (!port && (mdev->pndev[i] == ndev))
port = i;
mdev->upper[i] = mdev->pndev[i] ?
netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
/* condition not met: network device is a slave */
if (!mdev->upper[i])
do_bond = false;
if (num_eth_ports < 2)
continue;
/* condition not met: same master */
if (mdev->upper[i] != mdev->upper[i-1])
do_bond = false;
}
/* condition not met: 2 salves */
do_bond = (num_eth_ports == 2) ? do_bond : false;
/* handle only events that come with enough info */
if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
return NOTIFY_DONE;
if (do_bond) {
struct netdev_notifier_bonding_info *notifier_info = ptr;
struct netdev_bonding_info *bonding_info =
¬ifier_info->bonding_info;
/* required mode 1, 2 or 4 */
if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
(bonding_info->master.bond_mode != BOND_MODE_XOR) &&
(bonding_info->master.bond_mode != BOND_MODE_8023AD))
do_bond = false;
/* require exactly 2 slaves */
if (bonding_info->master.num_slaves != 2)
do_bond = false;
/* calc v2p */
if (do_bond) {
if (bonding_info->master.bond_mode ==
BOND_MODE_ACTIVEBACKUP) {
/* in active-backup mode virtual ports are
* mapped to the physical port of the active
* slave */
if (bonding_info->slave.state ==
BOND_STATE_BACKUP) {
if (port == 1) {
v2p_port1 = 2;
v2p_port2 = 2;
} else {
v2p_port1 = 1;
v2p_port2 = 1;
}
} else { /* BOND_STATE_ACTIVE */
if (port == 1) {
v2p_port1 = 1;
v2p_port2 = 1;
} else {
v2p_port1 = 2;
v2p_port2 = 2;
}
}
} else { /* Active-Active */
/* in active-active mode a virtual port is
* mapped to the native physical port if and only
* if the physical port is up */
__s8 link = bonding_info->slave.link;
if (port == 1)
v2p_port2 = 2;
else
v2p_port1 = 1;
if ((link == BOND_LINK_UP) ||
(link == BOND_LINK_FAIL)) {
if (port == 1)
v2p_port1 = 1;
else
v2p_port2 = 2;
} else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
if (port == 1)
v2p_port1 = 2;
else
v2p_port2 = 1;
}
}
}
}
mlx4_queue_bond_work(dev, do_bond, v2p_port1, v2p_port2);
return NOTIFY_DONE;
}
void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
struct mlx4_en_stats_bitmap *stats_bitmap,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause)
{
int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
if (!mlx4_is_slave(dev) &&
(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
mutex_lock(&stats_bitmap->mutex);
bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
if (rx_ppp)
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_FLOW_PRIORITY_STATS_RX);
last_i += NUM_FLOW_PRIORITY_STATS_RX;
if (rx_pause && !(rx_ppp))
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_FLOW_STATS_RX);
last_i += NUM_FLOW_STATS_RX;
if (tx_ppp)
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_FLOW_PRIORITY_STATS_TX);
last_i += NUM_FLOW_PRIORITY_STATS_TX;
if (tx_pause && !(tx_ppp))
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_FLOW_STATS_TX);
last_i += NUM_FLOW_STATS_TX;
mutex_unlock(&stats_bitmap->mutex);
}
}
void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
struct mlx4_en_stats_bitmap *stats_bitmap,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause)
{
int last_i = 0;
mutex_init(&stats_bitmap->mutex);
bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
if (mlx4_is_slave(dev)) {
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(rx_packets), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(tx_packets), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
bitmap_set(stats_bitmap->bitmap, last_i +
MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
} else {
bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
}
last_i += NUM_MAIN_STATS;
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
last_i += NUM_PORT_STATS;
if (mlx4_is_master(dev))
bitmap_set(stats_bitmap->bitmap, last_i,
NUM_PF_STATS);
last_i += NUM_PF_STATS;
mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
rx_ppp, rx_pause,
tx_ppp, tx_pause);
last_i += NUM_FLOW_STATS;
if (!mlx4_is_slave(dev))
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
last_i += NUM_PKT_STATS;
bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
last_i += NUM_XDP_STATS;
if (!mlx4_is_slave(dev))
bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS);
last_i += NUM_PHY_STATS;
}
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
struct net_device *dev;
struct mlx4_en_priv *priv;
int i, t;
int err;
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
if (dev == NULL)
return -ENOMEM;
netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
dev->dev_port = port - 1;
/*
* Initialize driver private data
*/
priv = netdev_priv(dev);
memset(priv, 0, sizeof(struct mlx4_en_priv));
priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->restart_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_RFS_ACCEL
INIT_LIST_HEAD(&priv->filters);
spin_lock_init(&priv->filters_lock);
#endif
priv->dev = dev;
priv->mdev = mdev;
priv->ddev = &mdev->pdev->dev;
priv->prof = prof;
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
priv->tx_ring_num[t] = prof->tx_ring_num[t];
if (!priv->tx_ring_num[t])
continue;
priv->tx_ring[t] = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_tx_ring *),
GFP_KERNEL);
if (!priv->tx_ring[t]) {
err = -ENOMEM;
goto out;
}
priv->tx_cq[t] = kcalloc(MAX_TX_RINGS,
sizeof(struct mlx4_en_cq *),
GFP_KERNEL);
if (!priv->tx_cq[t]) {
err = -ENOMEM;
goto out;
}
}
priv->rx_ring_num = prof->rx_ring_num;
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
priv->cqe_size = mdev->dev->caps.cqe_size;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
u8 prio;
for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
priv->ets.prio_tc[prio] = prio;
priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
}
priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
priv->cee_config.pfc_state = false;
for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
priv->cee_config.dcb_pfc[i] = pfc_disabled;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
} else {
en_info(priv, "enabling only PFC DCB ops\n");
dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
}
}
#endif
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
INIT_HLIST_HEAD(&priv->mac_hash[i]);
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
/* Set default MAC */
dev->addr_len = ETH_ALEN;
mlx4_en_u64_to_mac(dev, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
} else if (mlx4_is_slave(priv->mdev->dev) &&
(priv->mdev->dev->port_random_macs & 1 << priv->port)) {
/* Random MAC was assigned in mlx4_slave_cap
* in mlx4_core module
*/
dev->addr_assign_type |= NET_ADDR_RANDOM;
en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
}
memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
err = mlx4_en_alloc_resources(priv);
if (err)
goto out;
/* Initialize time stamping config */
priv->hwtstamp_config.flags = 0;
priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE);
if (err) {
en_err(priv, "Failed to allocate page for rx qps\n");
goto out;
}
priv->allocated = 1;
/*
* Initialize netdev entry points
*/
if (mlx4_is_master(priv->mdev->dev))
dev->netdev_ops = &mlx4_netdev_ops_master;
else
dev->netdev_ops = &mlx4_netdev_ops;
dev->xdp_metadata_ops = &mlx4_xdp_metadata_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
* Set driver features
*/
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (mdev->LSO_support)
dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
if (mdev->dev->caps.tunnel_offload_mode ==
MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
dev->features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_PARTIAL;
dev->udp_tunnel_nic_info = &mlx4_udp_tunnels;
}
dev->vlan_features = dev->hw_features;
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
dev->features |= NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_FILTER;
dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
}
if (mlx4_is_slave(mdev->dev)) {
bool vlan_offload_disabled;
int phv;
err = get_phv_bit(mdev->dev, port, &phv);
if (!err && phv) {
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
}
err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
&vlan_offload_disabled);
if (!err && vlan_offload_disabled) {
dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_VLAN_STAG_RX);
dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_VLAN_STAG_RX);
}
} else {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(mdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
}
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
dev->hw_features |= NETIF_F_RXFCS;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
dev->hw_features |= NETIF_F_RXALL;
if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED &&
mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
dev->hw_features |= NETIF_F_NTUPLE;
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
/* Setting a default hash function value */
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
} else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
priv->rss_hash_fn = ETH_RSS_HASH_XOR;
} else {
en_warn(priv,
"No RSS hash capabilities exposed, using Toeplitz\n");
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
/* supports LSOv2 packets. */
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
netif_carrier_off(dev);
mlx4_en_set_default_moderation(priv);
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
/* Configure port */
mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
prof->tx_pause, prof->tx_ppp,
prof->rx_pause, prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
priv->port, err);
goto out;
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
if (err) {
en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
err);
goto out;
}
}
/* Init port */
en_warn(priv, "Initializing port\n");
err = mlx4_INIT_PORT(mdev->dev, priv->port);
if (err) {
en_err(priv, "Failed Initializing port\n");
goto out;
}
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
/* Initialize time stamp mechanism */
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_init_timestamp(mdev);
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
mdev->profile.prof[priv->port].rx_ppp,
mdev->profile.prof[priv->port].rx_pause,
mdev->profile.prof[priv->port].tx_ppp,
mdev->profile.prof[priv->port].tx_pause);
SET_NETDEV_DEVLINK_PORT(dev,
mlx4_get_devlink_port(mdev->dev, priv->port));
err = register_netdev(dev);
if (err) {
en_err(priv, "Netdev registration failed for port %d\n", port);
goto out;
}
priv->registered = 1;
return 0;
out:
mlx4_en_destroy_netdev(dev);
return err;
}
int mlx4_en_reset_config(struct net_device *dev,
struct hwtstamp_config ts_config,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_port_profile new_prof;
struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
!DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
return 0; /* Nothing to change */
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
(features & NETIF_F_HW_VLAN_CTAG_RX) &&
(priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
return -EINVAL;
}
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
mutex_lock(&mdev->state_lock);
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
if (err)
goto out;
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
} else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
/* RX time-stamping is OFF, update the RX vlan offload
* to the latest wanted state
*/
if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
else
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
if (features & NETIF_F_RXFCS)
dev->features |= NETIF_F_RXFCS;
else
dev->features &= ~NETIF_F_RXFCS;
}
/* RX vlan offload and RX time-stamping can't co-exist !
* Regardless of the caller's choice,
* Turn Off RX vlan offload in case of time-stamping is ON
*/
if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
en_err(priv, "Failed starting port\n");
}
if (!err)
err = mlx4_en_moderation_update(priv);
out:
mutex_unlock(&mdev->state_lock);
kfree(tmp);
if (!err)
netdev_features_change(dev);
return err;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
|
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
#include <linux/export.h>
#include "mlx4.h"
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
return 1 << dev->oper_log_mgm_entry_size;
}
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
{
return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
}
static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox,
u32 size,
u64 *reg_id)
{
u64 imm;
int err = 0;
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
return err;
*reg_id = imm;
return err;
}
static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
{
int err = 0;
err = mlx4_cmd(dev, regid, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
return err;
}
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
struct mlx4_cmd_mailbox *mailbox)
{
u32 in_mod;
in_mod = (u32) port << 16 | steer << 1;
return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
}
static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
u16 *hash, u8 op_mod)
{
u64 imm;
int err;
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (!err)
*hash = imm;
return err;
}
static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_promisc_qp *pqp;
if (port < 1 || port > dev->caps.num_ports)
return NULL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
if (pqp->qpn == qpn)
return pqp;
}
/* not found */
return NULL;
}
/*
* Add new entry to steering data structure.
* All promisc QPs should be added as well
*/
static int new_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
struct mlx4_steer_index *new_entry;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp = NULL;
u32 prot;
int err;
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
INIT_LIST_HEAD(&new_entry->duplicates);
new_entry->index = index;
list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
/* If the given qpn is also a promisc qp,
* it should be inserted to duplicates list
*/
pqp = get_promisc_qp(dev, port, steer, qpn);
if (pqp) {
dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
if (!dqp) {
err = -ENOMEM;
goto out_alloc;
}
dqp->qpn = qpn;
list_add_tail(&dqp->list, &new_entry->duplicates);
}
/* if no promisc qps for this vep, we are done */
if (list_empty(&s_steer->promisc_qps[steer]))
return 0;
/* now need to add all the promisc qps to the new
* steering entry, as they should also receive the packets
* destined to this address */
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = -ENOMEM;
goto out_alloc;
}
mgm = mailbox->buf;
err = mlx4_READ_ENTRY(dev, index, mailbox);
if (err)
goto out_mailbox;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
prot = be32_to_cpu(mgm->members_count) >> 30;
list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
/* don't add already existing qpn */
if (pqp->qpn == qpn)
continue;
if (members_count == dev->caps.num_qp_per_mgm) {
/* out of space */
err = -ENOMEM;
goto out_mailbox;
}
/* add the qpn */
mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
}
/* update the qps count and update the entry with all the promisc qps*/
mgm->members_count = cpu_to_be32(members_count | (prot << 30));
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
out_mailbox:
mlx4_free_cmd_mailbox(dev, mailbox);
if (!err)
return 0;
out_alloc:
if (dqp) {
list_del(&dqp->list);
kfree(dqp);
}
list_del(&new_entry->list);
kfree(new_entry);
return err;
}
/* update the data structures with existing steering entry */
static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
pqp = get_promisc_qp(dev, port, steer, qpn);
if (!pqp)
return 0; /* nothing to do */
list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
if (tmp_entry->index == index) {
entry = tmp_entry;
break;
}
}
if (unlikely(!entry)) {
mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
return -EINVAL;
}
/* the given qpn is listed as a promisc qpn
* we need to add it as a duplicate to this entry
* for future references */
list_for_each_entry(dqp, &entry->duplicates, list) {
if (qpn == dqp->qpn)
return 0; /* qp is already duplicated */
}
/* add the qp as a duplicate on this index */
dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
if (!dqp)
return -ENOMEM;
dqp->qpn = qpn;
list_add_tail(&dqp->list, &entry->duplicates);
return 0;
}
/* Check whether a qpn is a duplicate on steering entry
* If so, it should not be removed from mgm */
static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
if (port < 1 || port > dev->caps.num_ports)
return false;
s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
if (!get_promisc_qp(dev, port, steer, qpn))
return false;
/* The qp is promisc qp so it is a duplicate on this index
* Find the index entry, and remove the duplicate */
list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
if (tmp_entry->index == index) {
entry = tmp_entry;
break;
}
}
if (unlikely(!entry)) {
mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
return false;
}
list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
if (dqp->qpn == qpn) {
list_del(&dqp->list);
kfree(dqp);
}
}
return true;
}
/* Returns true if all the QPs != tqpn contained in this entry
* are Promisc QPs. Returns false otherwise.
*/
static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn,
u32 *members_count)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 m_count;
bool ret = false;
int i;
if (port < 1 || port > dev->caps.num_ports)
return false;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return false;
mgm = mailbox->buf;
if (mlx4_READ_ENTRY(dev, index, mailbox))
goto out;
m_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count)
*members_count = m_count;
for (i = 0; i < m_count; i++) {
u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
/* the qp is not promisc, the entry can't be removed */
goto out;
}
}
ret = true;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
/* IF a steering entry contains only promisc QPs, it can be removed. */
static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *entry = NULL, *tmp_entry;
u32 members_count;
bool ret = false;
if (port < 1 || port > dev->caps.num_ports)
return false;
s_steer = &mlx4_priv(dev)->steer[port - 1];
if (!promisc_steering_entry(dev, port, steer, index,
tqpn, &members_count))
goto out;
/* All the qps currently registered for this entry are promiscuous,
* Checking for duplicates */
ret = true;
list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
if (entry->index == index) {
if (list_empty(&entry->duplicates) ||
members_count == 1) {
struct mlx4_promisc_qp *pqp, *tmp_pqp;
/* If there is only 1 entry in duplicates then
* this is the QP we want to delete, going over
* the list and deleting the entry.
*/
list_del(&entry->list);
list_for_each_entry_safe(pqp, tmp_pqp,
&entry->duplicates,
list) {
list_del(&pqp->list);
kfree(pqp);
}
kfree(entry);
} else {
/* This entry contains duplicates so it shouldn't be removed */
ret = false;
goto out;
}
}
}
out:
return ret;
}
static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
struct mlx4_steer_index *entry;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
u32 members_count;
u32 prot;
int i;
bool found;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
if (get_promisc_qp(dev, port, steer, qpn)) {
err = 0; /* Noting to do, already exists */
goto out_mutex;
}
pqp = kmalloc(sizeof(*pqp), GFP_KERNEL);
if (!pqp) {
err = -ENOMEM;
goto out_mutex;
}
pqp->qpn = qpn;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = -ENOMEM;
goto out_alloc;
}
mgm = mailbox->buf;
if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
/* The promisc QP needs to be added for each one of the steering
* entries. If it already exists, needs to be added as
* a duplicate for this entry.
*/
list_for_each_entry(entry,
&s_steer->steer_entries[steer],
list) {
err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
if (err)
goto out_mailbox;
members_count = be32_to_cpu(mgm->members_count) &
0xffffff;
prot = be32_to_cpu(mgm->members_count) >> 30;
found = false;
for (i = 0; i < members_count; i++) {
if ((be32_to_cpu(mgm->qp[i]) &
MGM_QPN_MASK) == qpn) {
/* Entry already exists.
* Add to duplicates.
*/
dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
if (!dqp) {
err = -ENOMEM;
goto out_mailbox;
}
dqp->qpn = qpn;
list_add_tail(&dqp->list,
&entry->duplicates);
found = true;
}
}
if (!found) {
/* Need to add the qpn to mgm */
if (members_count ==
dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_mailbox;
}
mgm->qp[members_count++] =
cpu_to_be32(qpn & MGM_QPN_MASK);
mgm->members_count =
cpu_to_be32(members_count |
(prot << 30));
err = mlx4_WRITE_ENTRY(dev, entry->index,
mailbox);
if (err)
goto out_mailbox;
}
}
}
/* add the new qpn to list of promisc qps */
list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
/* now need to add all the promisc qps to default entry */
memset(mgm, 0, sizeof(*mgm));
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) {
if (members_count == dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_list;
}
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
}
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_list;
mlx4_free_cmd_mailbox(dev, mailbox);
mutex_unlock(&priv->mcg_table.mutex);
return 0;
out_list:
list_del(&pqp->list);
out_mailbox:
mlx4_free_cmd_mailbox(dev, mailbox);
out_alloc:
kfree(pqp);
out_mutex:
mutex_unlock(&priv->mcg_table.mutex);
return err;
}
static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_steer *s_steer;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
struct mlx4_steer_index *entry, *tmp_entry;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
u32 members_count;
bool found;
bool back_to_list = false;
int i;
int err;
if (port < 1 || port > dev->caps.num_ports)
return -EINVAL;
s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
pqp = get_promisc_qp(dev, port, steer, qpn);
if (unlikely(!pqp)) {
mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
/* nothing to do */
err = 0;
goto out_mutex;
}
/*remove from list of promisc qps */
list_del(&pqp->list);
/* set the default entry not to include the removed one */
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = -ENOMEM;
back_to_list = true;
goto out_list;
}
mgm = mailbox->buf;
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_mailbox;
if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
/* Remove the QP from all the steering entries */
list_for_each_entry_safe(entry, tmp_entry,
&s_steer->steer_entries[steer],
list) {
found = false;
list_for_each_entry(dqp, &entry->duplicates, list) {
if (dqp->qpn == qpn) {
found = true;
break;
}
}
if (found) {
/* A duplicate, no need to change the MGM,
* only update the duplicates list
*/
list_del(&dqp->list);
kfree(dqp);
} else {
int loc = -1;
err = mlx4_READ_ENTRY(dev,
entry->index,
mailbox);
if (err)
goto out_mailbox;
members_count =
be32_to_cpu(mgm->members_count) &
0xffffff;
if (!members_count) {
mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n",
qpn, entry->index);
list_del(&entry->list);
kfree(entry);
continue;
}
for (i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) &
MGM_QPN_MASK) == qpn) {
loc = i;
break;
}
if (loc < 0) {
mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
qpn, entry->index);
err = -EINVAL;
goto out_mailbox;
}
/* Copy the last QP in this MGM
* over removed QP
*/
mgm->qp[loc] = mgm->qp[members_count - 1];
mgm->qp[members_count - 1] = 0;
mgm->members_count =
cpu_to_be32(--members_count |
(MLX4_PROT_ETH << 30));
err = mlx4_WRITE_ENTRY(dev,
entry->index,
mailbox);
if (err)
goto out_mailbox;
}
}
}
out_mailbox:
mlx4_free_cmd_mailbox(dev, mailbox);
out_list:
if (back_to_list)
list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
else
kfree(pqp);
out_mutex:
mutex_unlock(&priv->mcg_table.mutex);
return err;
}
/*
* Caller must hold MCG table semaphore. gid and mgm parameters must
* be properly aligned for command interface.
*
* Returns 0 unless a firmware command error occurs.
*
* If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
* and *mgm holds MGM entry.
*
* if GID is found in AMGM, *index = index in AMGM, *prev = index of
* previous entry in hash chain and *mgm holds AMGM entry.
*
* If no AMGM exists for given gid, *index = -1, *prev = index of last
* entry in hash chain and *mgm holds end of hash chain.
*/
static int find_entry(struct mlx4_dev *dev, u8 port,
u8 *gid, enum mlx4_protocol prot,
struct mlx4_cmd_mailbox *mgm_mailbox,
int *prev, int *index)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
u16 hash;
u8 op_mod = (prot == MLX4_PROT_ETH) ?
!!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return -ENOMEM;
mgid = mailbox->buf;
memcpy(mgid, gid, 16);
err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
return err;
if (0)
mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
*index = hash;
*prev = -1;
do {
err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
if (err)
return err;
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
if (*index != hash) {
mlx4_err(dev, "Found zero MGID in AMGM\n");
err = -EINVAL;
}
return err;
}
if (!memcmp(mgm->gid, gid, 16) &&
be32_to_cpu(mgm->members_count) >> 30 == prot)
return err;
*prev = *index;
*index = be32_to_cpu(mgm->next_gid_index) >> 6;
} while (*index);
*index = -1;
return err;
}
static const u8 __promisc_mode[] = {
[MLX4_FS_REGULAR] = 0x0,
[MLX4_FS_ALL_DEFAULT] = 0x1,
[MLX4_FS_MC_DEFAULT] = 0x3,
[MLX4_FS_MIRROR_RX_PORT] = 0x4,
[MLX4_FS_MIRROR_SX_PORT] = 0x5,
[MLX4_FS_UC_SNIFFER] = 0x6,
[MLX4_FS_MC_SNIFFER] = 0x7,
};
int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
enum mlx4_net_trans_promisc_mode flow_type)
{
if (flow_type >= MLX4_FS_MODE_NUM) {
mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
return -EINVAL;
}
return __promisc_mode[flow_type];
}
EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
struct mlx4_net_trans_rule_hw_ctrl *hw)
{
u8 flags = 0;
flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
flags |= ctrl->exclusive ? (1 << 2) : 0;
flags |= ctrl->allow_loopback ? (1 << 3) : 0;
hw->flags = flags;
hw->type = __promisc_mode[ctrl->promisc_mode];
hw->prio = cpu_to_be16(ctrl->priority);
hw->port = ctrl->port;
hw->qpn = cpu_to_be32(ctrl->qpn);
}
const u16 __sw_id_hw[] = {
[MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
[MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
[MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
[MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
[MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006,
[MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008
};
int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
if (id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
return __sw_id_hw[id];
}
EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
static const int __rule_hw_sz[] = {
[MLX4_NET_TRANS_RULE_ID_ETH] =
sizeof(struct mlx4_net_trans_rule_hw_eth),
[MLX4_NET_TRANS_RULE_ID_IB] =
sizeof(struct mlx4_net_trans_rule_hw_ib),
[MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
[MLX4_NET_TRANS_RULE_ID_IPV4] =
sizeof(struct mlx4_net_trans_rule_hw_ipv4),
[MLX4_NET_TRANS_RULE_ID_TCP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_UDP] =
sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
[MLX4_NET_TRANS_RULE_ID_VXLAN] =
sizeof(struct mlx4_net_trans_rule_hw_vxlan)
};
int mlx4_hw_rule_sz(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id)
{
if (id >= MLX4_NET_TRANS_RULE_NUM) {
mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
return -EINVAL;
}
return __rule_hw_sz[id];
}
EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
struct _rule_hw *rule_hw)
{
if (mlx4_hw_rule_sz(dev, spec->id) < 0)
return -EINVAL;
memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
switch (spec->id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
ETH_ALEN);
memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
ETH_ALEN);
if (spec->eth.ether_type_enable) {
rule_hw->eth.ether_type_enable = 1;
rule_hw->eth.ether_type = spec->eth.ether_type;
}
rule_hw->eth.vlan_tag = spec->eth.vlan_id;
rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
break;
case MLX4_NET_TRANS_RULE_ID_IB:
rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
break;
case MLX4_NET_TRANS_RULE_ID_IPV6:
return -EOPNOTSUPP;
case MLX4_NET_TRANS_RULE_ID_IPV4:
rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
break;
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
break;
case MLX4_NET_TRANS_RULE_ID_VXLAN:
rule_hw->vxlan.vni =
cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8);
rule_hw->vxlan.vni_mask =
cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8);
break;
default:
return -EINVAL;
}
return __rule_hw_sz[spec->id];
}
static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
struct mlx4_net_trans_rule *rule)
{
#define BUF_SIZE 256
struct mlx4_spec_list *cur;
char buf[BUF_SIZE];
int len = 0;
mlx4_err(dev, "%s", str);
len += scnprintf(buf + len, BUF_SIZE - len,
"port = %d prio = 0x%x qp = 0x%x ",
rule->port, rule->priority, rule->qpn);
list_for_each_entry(cur, &rule->list, list) {
switch (cur->id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
len += scnprintf(buf + len, BUF_SIZE - len,
"dmac = %pM ", &cur->eth.dst_mac);
if (cur->eth.ether_type)
len += scnprintf(buf + len, BUF_SIZE - len,
"ethertype = 0x%x ",
be16_to_cpu(cur->eth.ether_type));
if (cur->eth.vlan_id)
len += scnprintf(buf + len, BUF_SIZE - len,
"vlan-id = %d ",
be16_to_cpu(cur->eth.vlan_id));
break;
case MLX4_NET_TRANS_RULE_ID_IPV4:
if (cur->ipv4.src_ip)
len += scnprintf(buf + len, BUF_SIZE - len,
"src-ip = %pI4 ",
&cur->ipv4.src_ip);
if (cur->ipv4.dst_ip)
len += scnprintf(buf + len, BUF_SIZE - len,
"dst-ip = %pI4 ",
&cur->ipv4.dst_ip);
break;
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
if (cur->tcp_udp.src_port)
len += scnprintf(buf + len, BUF_SIZE - len,
"src-port = %d ",
be16_to_cpu(cur->tcp_udp.src_port));
if (cur->tcp_udp.dst_port)
len += scnprintf(buf + len, BUF_SIZE - len,
"dst-port = %d ",
be16_to_cpu(cur->tcp_udp.dst_port));
break;
case MLX4_NET_TRANS_RULE_ID_IB:
len += scnprintf(buf + len, BUF_SIZE - len,
"dst-gid = %pI6\n", cur->ib.dst_gid);
len += scnprintf(buf + len, BUF_SIZE - len,
"dst-gid-mask = %pI6\n",
cur->ib.dst_gid_msk);
break;
case MLX4_NET_TRANS_RULE_ID_VXLAN:
len += scnprintf(buf + len, BUF_SIZE - len,
"VNID = %d ", be32_to_cpu(cur->vxlan.vni));
break;
case MLX4_NET_TRANS_RULE_ID_IPV6:
break;
default:
break;
}
}
len += scnprintf(buf + len, BUF_SIZE - len, "\n");
mlx4_err(dev, "%s", buf);
if (len >= BUF_SIZE)
mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
}
int mlx4_flow_attach(struct mlx4_dev *dev,
struct mlx4_net_trans_rule *rule, u64 *reg_id)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_spec_list *cur;
u32 size = 0;
int ret;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
if (!mlx4_qp_lookup(dev, rule->qpn)) {
mlx4_err_rule(dev, "QP doesn't exist\n", rule);
ret = -EINVAL;
goto out;
}
trans_rule_ctrl_to_hw(rule, mailbox->buf);
size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
list_for_each_entry(cur, &rule->list, list) {
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
if (ret < 0)
goto out;
size += ret;
}
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
if (ret == -ENOMEM) {
mlx4_err_rule(dev,
"mcg table is full. Fail to register network rule\n",
rule);
} else if (ret) {
if (ret == -ENXIO) {
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
mlx4_err_rule(dev,
"DMFS is not enabled, "
"failed to register network rule.\n",
rule);
else
mlx4_err_rule(dev,
"Rule exceeds the dmfs_high_rate_mode limitations, "
"failed to register network rule.\n",
rule);
} else {
mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
}
}
out:
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
EXPORT_SYMBOL_GPL(mlx4_flow_attach);
int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
{
int err;
err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
if (err)
mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
reg_id);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id)
{
int err;
struct mlx4_spec_list spec_eth_outer = { {NULL} };
struct mlx4_spec_list spec_vxlan = { {NULL} };
struct mlx4_spec_list spec_eth_inner = { {NULL} };
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_REGULAR,
};
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
rule.port = port;
rule.qpn = qpn;
rule.priority = prio;
INIT_LIST_HEAD(&rule.list);
spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
list_add_tail(&spec_eth_outer.list, &rule.list);
list_add_tail(&spec_vxlan.list, &rule.list);
list_add_tail(&spec_eth_inner.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
return err;
}
EXPORT_SYMBOL(mlx4_tunnel_steer_add);
int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
u32 max_range_qpn)
{
int err;
u64 in_param;
in_param = ((u64) min_range_qpn) << 32;
in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF;
err = mlx4_cmd(dev, in_param, 0, 0,
MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE);
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
int index = -1, prev;
int link = 0;
int i;
int err;
u8 port = gid[5];
u8 new_entry = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mgm = mailbox->buf;
mutex_lock(&priv->mcg_table.mutex);
err = find_entry(dev, port, gid, prot,
mailbox, &prev, &index);
if (err)
goto out;
if (index != -1) {
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
new_entry = 1;
memcpy(mgm->gid, gid, 16);
}
} else {
link = 1;
index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
if (index == -1) {
mlx4_err(dev, "No AMGM entries left\n");
err = -ENOMEM;
goto out;
}
index += dev->caps.num_mgms;
new_entry = 1;
memset(mgm, 0, sizeof(*mgm));
memcpy(mgm->gid, gid, 16);
}
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count == dev->caps.num_qp_per_mgm) {
mlx4_err(dev, "MGM at index %x is full\n", index);
err = -ENOMEM;
goto out;
}
for (i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
err = 0;
goto out;
}
if (block_mcast_loopback)
mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
(1U << MGM_BLCK_LB_BIT));
else
mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
if (err)
goto out;
if (!link)
goto out;
err = mlx4_READ_ENTRY(dev, prev, mailbox);
if (err)
goto out;
mgm->next_gid_index = cpu_to_be32(index << 6);
err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
if (err)
goto out;
out:
if (prot == MLX4_PROT_ETH && index != -1) {
/* manage the steering entry for promisc mode */
if (new_entry)
err = new_steering_entry(dev, port, steer,
index, qp->qpn);
else
err = existing_steering_entry(dev, port, steer,
index, qp->qpn);
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
mlx4_warn(dev, "Got AMGM index %d < %d\n",
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
index - dev->caps.num_mgms, MLX4_USE_RR);
}
mutex_unlock(&priv->mcg_table.mutex);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, enum mlx4_steer_type steer)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
int prev, index;
int i, loc = -1;
int err;
u8 port = gid[5];
bool removed_entry = false;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mgm = mailbox->buf;
mutex_lock(&priv->mcg_table.mutex);
err = find_entry(dev, port, gid, prot,
mailbox, &prev, &index);
if (err)
goto out;
if (index == -1) {
mlx4_err(dev, "MGID %pI6 not found\n", gid);
err = -EINVAL;
goto out;
}
/* If this QP is also a promisc QP, it shouldn't be removed only if
* at least one none promisc QP is also attached to this MCG
*/
if (prot == MLX4_PROT_ETH &&
check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
!promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
goto out;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; ++i)
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
loc = i;
break;
}
if (loc == -1) {
mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
err = -EINVAL;
goto out;
}
/* copy the last QP in this MGM over removed QP */
mgm->qp[loc] = mgm->qp[members_count - 1];
mgm->qp[members_count - 1] = 0;
mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
if (prot == MLX4_PROT_ETH)
removed_entry = can_remove_steering_entry(dev, port, steer,
index, qp->qpn);
if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) {
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
goto out;
}
/* We are going to delete the entry, members count should be 0 */
mgm->members_count = cpu_to_be32((u32) prot << 30);
if (prev == -1) {
/* Remove entry from MGM */
int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
if (amgm_index) {
err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
if (err)
goto out;
} else
memset(mgm->gid, 0, 16);
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
if (err)
goto out;
if (amgm_index) {
if (amgm_index < dev->caps.num_mgms)
mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
amgm_index - dev->caps.num_mgms, MLX4_USE_RR);
}
} else {
/* Remove entry from AMGM */
int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
err = mlx4_READ_ENTRY(dev, prev, mailbox);
if (err)
goto out;
mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
if (err)
goto out;
if (index < dev->caps.num_mgms)
mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
index - dev->caps.num_mgms, MLX4_USE_RR);
}
out:
mutex_unlock(&priv->mcg_table.mutex);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
/* In case device is under an error, return success as a closing command */
err = 0;
return err;
}
static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], u8 attach, u8 block_loopback,
enum mlx4_protocol prot)
{
struct mlx4_cmd_mailbox *mailbox;
int err = 0;
int qpn;
if (!mlx4_is_mfunc(dev))
return -EBADF;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memcpy(mailbox->buf, gid, 16);
qpn = qp->qpn;
qpn |= (prot << 28);
if (attach && block_loopback)
qpn |= (1 << 31);
err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err && !attach &&
dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
err = 0;
return err;
}
int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], u8 port,
int block_mcast_loopback,
enum mlx4_protocol prot, u64 *reg_id)
{
struct mlx4_spec_list spec = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.promisc_mode = MLX4_FS_REGULAR,
.priority = MLX4_DOMAIN_NIC,
};
rule.allow_loopback = !block_mcast_loopback;
rule.port = port;
rule.qpn = qp->qpn;
INIT_LIST_HEAD(&rule.list);
switch (prot) {
case MLX4_PROT_ETH:
spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
break;
case MLX4_PROT_IB_IPV6:
spec.id = MLX4_NET_TRANS_RULE_ID_IB;
memcpy(spec.ib.dst_gid, gid, 16);
memset(&spec.ib.dst_gid_msk, 0xff, 16);
break;
default:
return -EINVAL;
}
list_add_tail(&spec.list, &rule.list);
return mlx4_flow_attach(dev, &rule, reg_id);
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
u8 port, int block_mcast_loopback,
enum mlx4_protocol prot, u64 *reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
fallthrough;
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_MC_STEER << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 1,
block_mcast_loopback, prot);
return mlx4_qp_attach_common(dev, qp, gid,
block_mcast_loopback, prot,
MLX4_MC_STEER);
case MLX4_STEERING_MODE_DEVICE_MANAGED:
return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
block_mcast_loopback,
prot, reg_id);
default:
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, u64 reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_A0:
if (prot == MLX4_PROT_ETH)
return 0;
fallthrough;
case MLX4_STEERING_MODE_B0:
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_MC_STEER << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
return mlx4_qp_detach_common(dev, qp, gid, prot,
MLX4_MC_STEER);
case MLX4_STEERING_MODE_DEVICE_MANAGED:
return mlx4_flow_detach(dev, reg_id);
default:
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
u32 qpn, enum mlx4_net_trans_promisc_mode mode)
{
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
};
u64 *regid_p;
switch (mode) {
case MLX4_FS_ALL_DEFAULT:
regid_p = &dev->regid_promisc_array[port];
break;
case MLX4_FS_MC_DEFAULT:
regid_p = &dev->regid_allmulti_array[port];
break;
default:
return -1;
}
if (*regid_p != 0)
return -1;
rule.promisc_mode = mode;
rule.port = port;
rule.qpn = qpn;
INIT_LIST_HEAD(&rule.list);
mlx4_info(dev, "going promisc on %x\n", port);
return mlx4_flow_attach(dev, &rule, regid_p);
}
EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
enum mlx4_net_trans_promisc_mode mode)
{
int ret;
u64 *regid_p;
switch (mode) {
case MLX4_FS_ALL_DEFAULT:
regid_p = &dev->regid_promisc_array[port];
break;
case MLX4_FS_MC_DEFAULT:
regid_p = &dev->regid_allmulti_array[port];
break;
default:
return -1;
}
if (*regid_p == 0)
return -1;
ret = mlx4_flow_detach(dev, *regid_p);
if (ret == 0)
*regid_p = 0;
return ret;
}
EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
int mlx4_unicast_attach(struct mlx4_dev *dev,
struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
{
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 1,
block_mcast_loopback, prot);
return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
prot, MLX4_UC_STEER);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], enum mlx4_protocol prot)
{
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
if (mlx4_is_mfunc(dev))
return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
u32 qpn = (u32) vhcr->in_param & 0xffffffff;
int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
enum mlx4_steer_type steer = vhcr->in_modifier;
if (port < 0)
return -EINVAL;
/* Promiscuous unicast is not allowed in mfunc */
if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
return 0;
if (vhcr->op_modifier)
return add_promisc_qp(dev, port, steer, qpn);
else
return remove_promisc_qp(dev, port, steer, qpn);
}
static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
enum mlx4_steer_type steer, u8 add, u8 port)
{
return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
int mlx4_init_mcg_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
/* No need for mcg_table when fw managed the mcg table*/
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
return 0;
err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
dev->caps.num_amgms - 1, 0, 0);
if (err)
return err;
mutex_init(&priv->mcg_table.mutex);
return 0;
}
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
{
if (dev->caps.steering_mode !=
MLX4_STEERING_MODE_DEVICE_MANAGED)
mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/mcg.c
|
/*
* Copyright (c) 2012 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx4/device.h>
#include <linux/clocksource.h>
#include "mlx4_en.h"
/* mlx4_en_read_clock - read raw cycle counter (to be used by time counter)
*/
static u64 mlx4_en_read_clock(const struct cyclecounter *tc)
{
struct mlx4_en_dev *mdev =
container_of(tc, struct mlx4_en_dev, cycles);
struct mlx4_dev *dev = mdev->dev;
return mlx4_read_clock(dev) & tc->mask;
}
u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
{
u64 hi, lo;
struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
lo = (u64)be16_to_cpu(ts_cqe->timestamp_lo);
hi = ((u64)be32_to_cpu(ts_cqe->timestamp_hi) + !lo) << 16;
return hi | lo;
}
u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp)
{
unsigned int seq;
u64 nsec;
do {
seq = read_seqbegin(&mdev->clock_lock);
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
} while (read_seqretry(&mdev->clock_lock, seq));
return ns_to_ktime(nsec);
}
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp)
{
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
hwts->hwtstamp = mlx4_en_get_hwtstamp(mdev, timestamp);
}
/**
* mlx4_en_remove_timestamp - disable PTP device
* @mdev: board private structure
*
* Stop the PTP support.
**/
void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
{
if (mdev->ptp_clock) {
ptp_clock_unregister(mdev->ptp_clock);
mdev->ptp_clock = NULL;
mlx4_info(mdev, "removed PHC\n");
}
}
#define MLX4_EN_WRAP_AROUND_SEC 10UL
/* By scheduling the overflow check every 5 seconds, we have a reasonably
* good chance we wont miss a wrap around.
* TOTO: Use a timer instead of a work queue to increase the guarantee.
*/
#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
{
bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
MLX4_EN_OVERFLOW_PERIOD);
unsigned long flags;
if (timeout) {
write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_read(&mdev->clock);
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
mdev->last_overflow_check = jiffies;
}
}
/**
* mlx4_en_phc_adjfine - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
* @scaled_ppm: Desired frequency change in scaled parts per million
*
* Adjust the frequency of the PHC cycle counter by the indicated scaled_ppm
* from the base frequency.
*
* Scaled parts per million is ppm with a 16-bit binary fractional field.
**/
static int mlx4_en_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
u32 mult;
unsigned long flags;
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
mult = (u32)adjust_by_scaled_ppm(mdev->nominal_c_mult, scaled_ppm);
write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_read(&mdev->clock);
mdev->cycles.mult = mult;
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
return 0;
}
/**
* mlx4_en_phc_adjtime - Shift the time of the hardware clock
* @ptp: ptp clock structure
* @delta: Desired change in nanoseconds
*
* Adjust the timer by resetting the timecounter structure.
**/
static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
unsigned long flags;
write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_adjtime(&mdev->clock, delta);
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
return 0;
}
/**
* mlx4_en_phc_gettime - Reads the current time from the hardware clock
* @ptp: ptp clock structure
* @ts: timespec structure to hold the current time value
*
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
unsigned long flags;
u64 ns;
write_seqlock_irqsave(&mdev->clock_lock, flags);
ns = timecounter_read(&mdev->clock);
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
*ts = ns_to_timespec64(ns);
return 0;
}
/**
* mlx4_en_phc_settime - Set the current time on the hardware clock
* @ptp: ptp clock structure
* @ts: timespec containing the new time for the cycle counter
*
* Reset the timecounter to use a new base value instead of the kernel
* wall timer value.
**/
static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
{
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
u64 ns = timespec64_to_ns(ts);
unsigned long flags;
/* reset the timecounter */
write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_init(&mdev->clock, &mdev->cycles, ns);
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
return 0;
}
/**
* mlx4_en_phc_enable - enable or disable an ancillary feature
* @ptp: ptp clock structure
* @request: Desired resource to enable or disable
* @on: Caller passes one to enable or zero to disable
*
* Enable (or disable) ancillary features of the PHC subsystem.
* Currently, no ancillary features are supported.
**/
static int mlx4_en_phc_enable(struct ptp_clock_info __always_unused *ptp,
struct ptp_clock_request __always_unused *request,
int __always_unused on)
{
return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.owner = THIS_MODULE,
.max_adj = 100000000,
.n_alarm = 0,
.n_ext_ts = 0,
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
.adjfine = mlx4_en_phc_adjfine,
.adjtime = mlx4_en_phc_adjtime,
.gettime64 = mlx4_en_phc_gettime,
.settime64 = mlx4_en_phc_settime,
.enable = mlx4_en_phc_enable,
};
/* This function calculates the max shift that enables the user range
* of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
*/
static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
/* This comes from the reverse of clocksource_khz2mult */
return ilog2(div_u64(max_mul * freq_khz, 1000000));
}
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
unsigned long flags;
/* mlx4_en_init_timestamp is called for each netdev.
* mdev->ptp_clock is common for all ports, skip initialization if
* was done for other port.
*/
if (mdev->ptp_clock)
return;
seqlock_init(&mdev->clock_lock);
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
mdev->nominal_c_mult = mdev->cycles.mult;
write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_init(&mdev->clock, &mdev->cycles,
ktime_to_ns(ktime_get_real()));
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
/* Configure the PHC */
mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
mdev->ptp_clock = ptp_clock_register(&mdev->ptp_clock_info,
&mdev->pdev->dev);
if (IS_ERR(mdev->ptp_clock)) {
mdev->ptp_clock = NULL;
mlx4_err(mdev, "ptp_clock_register failed\n");
} else if (mdev->ptp_clock) {
mlx4_info(mdev, "registered PHC clock\n");
}
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_clock.c
|
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <net/devlink.h>
#include <uapi/rdma/mlx4-abi.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
#include "mlx4.h"
#include "fw.h"
#include "icm.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
struct workqueue_struct *mlx4_wq;
#ifdef CONFIG_MLX4_DEBUG
int mlx4_debug_level; /* 0 by default */
module_param_named(debug_level, mlx4_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
#endif /* CONFIG_MLX4_DEBUG */
#ifdef CONFIG_PCI_MSI
static int msi_x = 1;
module_param(msi_x, int, 0444);
MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x");
#else /* CONFIG_PCI_MSI */
#define msi_x (0)
#endif /* CONFIG_PCI_MSI */
static uint8_t num_vfs[3] = {0, 0, 0};
static int num_vfs_argc;
module_param_array(num_vfs, byte, &num_vfs_argc, 0444);
MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
"num_vfs=port1,port2,port1+2");
static uint8_t probe_vf[3] = {0, 0, 0};
static int probe_vfs_argc;
module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
"probe_vf=port1,port2,port1+2");
static int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
module_param_named(log_num_mgm_entry_size,
mlx4_log_num_mgm_entry_size, int, 0444);
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:"
" 10 gives 248.range: 7 <="
" log_num_mgm_entry_size <= 12."
" To activate device managed"
" flow steering when available, set to -1");
static bool enable_64b_cqe_eqe = true;
module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
static bool enable_4k_uar;
module_param(enable_4k_uar, bool, 0444);
MODULE_PARM_DESC(enable_4k_uar,
"Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
MLX4_FUNC_CAP_DMFS_A0_STATIC)
#define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
static char mlx4_version[] =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION "\n";
static const struct mlx4_profile default_profile = {
.num_qp = 1 << 18,
.num_srq = 1 << 16,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 19,
.num_mtt = 1 << 20, /* It is really num mtt segements */
};
static const struct mlx4_profile low_mem_profile = {
.num_qp = 1 << 17,
.num_srq = 1 << 6,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 8,
.num_mcg = 1 << 8,
.num_mpt = 1 << 9,
.num_mtt = 1 << 7,
};
static int log_num_mac = 7;
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
static int log_num_vlan;
module_param_named(log_num_vlan, log_num_vlan, int, 0444);
MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
/* Log2 max number of VLANs per ETH port (0-7) */
#define MLX4_LOG_NUM_VLANS 7
#define MLX4_MIN_LOG_NUM_VLANS 0
#define MLX4_MIN_LOG_NUM_MAC 1
static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
int log_mtts_per_seg = ilog2(1);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
"(0-7) (default: 0)");
static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
static int arr_argc = 2;
module_param_array(port_type_array, int, &arr_argc, 0444);
MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
"1 for IB, 2 for Ethernet");
struct mlx4_port_config {
struct list_head list;
enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
struct pci_dev *pdev;
};
static atomic_t pf_loading = ATOMIC_INIT(0);
static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
ctx->val.vbool = !!mlx4_internal_err_reset;
return 0;
}
static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
mlx4_internal_err_reset = ctx->val.vbool;
return 0;
}
static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
ctx->val.vbool = dev->persist->crdump.snapshot_enable;
return 0;
}
static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
dev->persist->crdump.snapshot_enable = ctx->val.vbool;
return 0;
}
static int
mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
u32 value = val.vu32;
if (value < 1 || value > 128)
return -ERANGE;
if (!is_power_of_2(value)) {
NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
return -EINVAL;
}
return 0;
}
enum mlx4_devlink_param_id {
MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
};
static const struct devlink_param mlx4_devlink_params[] = {
DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
mlx4_devlink_ierr_reset_get,
mlx4_devlink_ierr_reset_set, NULL),
DEVLINK_PARAM_GENERIC(MAX_MACS,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx4_devlink_max_macs_validate),
DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
mlx4_devlink_crdump_snapshot_get,
mlx4_devlink_crdump_snapshot_set, NULL),
DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
"enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, NULL),
DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
"enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, NULL),
};
static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
{
union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset;
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
value);
value.vu32 = 1UL << log_num_mac;
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
value);
value.vbool = enable_64b_cqe_eqe;
devl_param_driverinit_value_set(devlink,
MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
value);
value.vbool = enable_4k_uar;
devl_param_driverinit_value_set(devlink,
MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
value);
value.vbool = false;
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
value);
}
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
/* The reserved_uars is calculated by system page size unit.
* Therefore, adjustment is added when the uar page size is less
* than the system page size
*/
dev->caps.reserved_uars =
max_t(int,
mlx4_get_num_reserved_uar(dev),
dev_cap->reserved_uars /
(1 << (PAGE_SHIFT - dev->uar_page_shift)));
}
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
int i;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
return -EOPNOTSUPP;
}
}
}
for (i = 0; i < dev->caps.num_ports; i++) {
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
i + 1);
return -EOPNOTSUPP;
}
}
return 0;
}
static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
int i;
for (i = 1; i <= dev->caps.num_ports; ++i)
dev->caps.port_mask[i] = dev->caps.port_type[i];
}
enum {
MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
};
static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err = 0;
struct mlx4_func func;
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
err = mlx4_QUERY_FUNC(dev, &func, 0);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
return err;
}
dev_cap->max_eqs = func.max_eq;
dev_cap->reserved_eqs = func.rsvd_eqs;
dev_cap->reserved_uars = func.rsvd_uars;
err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
}
return err;
}
static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
{
struct mlx4_caps *dev_cap = &dev->caps;
/* FW not supporting or cancelled by user */
if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
return;
/* Must have 64B CQE_EQE enabled by FW to use bigger stride
* When FW has NCSI it may decide not to report 64B CQE/EQEs
*/
if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
return;
}
if (cache_line_size() == 128 || cache_line_size() == 256) {
mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
/* Changing the real data inside CQE size to 32B */
dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
if (mlx4_is_master(dev))
dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
} else {
if (cache_line_size() != 32 && cache_line_size() != 64)
mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
}
}
static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
struct mlx4_port_cap *port_cap)
{
dev->caps.vl_cap[port] = port_cap->max_vl;
dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
/* set gid and pkey table operating lengths by default
* to non-sriov values
*/
dev->caps.gid_table_len[port] = port_cap->max_gids;
dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
dev->caps.port_width_cap[port] = port_cap->max_port_width;
dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
dev->caps.max_tc_eth = port_cap->max_tc_eth;
dev->caps.def_mac[port] = port_cap->def_mac;
dev->caps.supported_type[port] = port_cap->supported_port_types;
dev->caps.suggested_type[port] = port_cap->suggested_type;
dev->caps.default_sense[port] = port_cap->default_sense;
dev->caps.trans_type[port] = port_cap->trans_type;
dev->caps.vendor_oui[port] = port_cap->vendor_oui;
dev->caps.wavelength[port] = port_cap->wavelength;
dev->caps.trans_code[port] = port_cap->trans_code;
return 0;
}
static int mlx4_dev_port(struct mlx4_dev *dev, int port,
struct mlx4_port_cap *port_cap)
{
int err = 0;
err = mlx4_QUERY_PORT(dev, port, port_cap);
if (err)
mlx4_err(dev, "QUERY_PORT command failed.\n");
return err;
}
static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
{
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
return;
if (mlx4_is_mfunc(dev)) {
mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
return;
}
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
mlx4_dbg(dev,
"Keep FCS is not supported - Disabling Ignore FCS");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
return;
}
}
#define MLX4_A0_STEERING_TABLE_SIZE 256
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err;
int i;
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
mlx4_dev_cap_dump(dev, dev_cap);
if (dev_cap->min_page_sz > PAGE_SIZE) {
mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
dev_cap->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_cap->num_ports > MLX4_MAX_PORTS) {
mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
dev_cap->num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev_cap->uar_size,
(unsigned long long)
pci_resource_len(dev->persist->pdev, 2));
return -ENODEV;
}
dev->caps.num_ports = dev_cap->num_ports;
dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
dev->caps.num_sys_eqs :
MLX4_MAX_EQ_NUM;
for (i = 1; i <= dev->caps.num_ports; ++i) {
err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
if (err) {
mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
return err;
}
}
dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
dev->caps.bf_reg_size = dev_cap->bf_reg_size;
dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
dev->caps.max_sq_sg = dev_cap->max_sq_sg;
dev->caps.max_rq_sg = dev_cap->max_rq_sg;
dev->caps.max_wqes = dev_cap->max_qp_sz;
dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE to enable resizing the CQ.
*/
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->reserved_xrcds : 0;
dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->max_xrcds : 0;
dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
dev->caps.flags2 = dev_cap->flags2;
dev->caps.bmme_flags = dev_cap->bmme_flags;
dev->caps.reserved_lkey = dev_cap->reserved_lkey;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
dev->caps.wol_port[1] = dev_cap->wol_port[1];
dev->caps.wol_port[2] = dev_cap->wol_port[2];
dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs;
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
/* Virtual PCI function needs to determine UAR page size from
* firmware. Only master PCI function can set the uar page size
*/
if (enable_4k_uar || !dev->persist->num_vfs)
dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
else
dev->uar_page_shift = PAGE_SHIFT;
mlx4_set_num_reserved_uars(dev, dev_cap);
}
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
struct mlx4_init_hca_param hca_param;
memset(&hca_param, 0, sizeof(hca_param));
err = mlx4_QUERY_HCA(dev, &hca_param);
/* Turn off PHV_EN flag in case phv_check_en is set.
* phv_check_en is a HW check that parse the packet and verify
* phv bit was reported correctly in the wqe. To allow QinQ
* PHV_EN flag should be set and phv_check_en must be cleared
* otherwise QinQ packets will be drop by the HW.
*/
if (err || hca_param.phv_check_en)
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
}
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
/* Don't do sense port on multifunction devices (for now at least) */
if (mlx4_is_mfunc(dev))
dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
if (mlx4_low_memory_profile()) {
dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
} else {
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
}
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
if (dev->caps.supported_type[i]) {
/* if only ETH is supported - assign ETH */
if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
/* if only IB is supported, assign IB */
else if (dev->caps.supported_type[i] ==
MLX4_PORT_TYPE_IB)
dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
else {
/* if IB and ETH are supported, we set the port
* type according to user selection of port type;
* if user selected none, take the FW hint */
if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
else
dev->caps.port_type[i] = port_type_array[i - 1];
}
}
/*
* Link sensing is allowed on the port if 3 conditions are true:
* 1. Both protocols are supported on the port.
* 2. Different types are supported on the port
* 3. FW declared that it supports link sensing
*/
mlx4_priv(dev)->sense.sense_allowed[i] =
((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
(dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
/*
* If "default_sense" bit is set, we move the port to "AUTO" mode
* and perform sense_port FW command to try and set the correct
* port type from beginning
*/
if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
mlx4_SENSE_PORT(dev, i, &sensed_port);
if (sensed_port != MLX4_PORT_TYPE_NONE)
dev->caps.port_type[i] = sensed_port;
} else {
dev->caps.possible_type[i] = dev->caps.port_type[i];
}
if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_macs);
}
if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_vlans);
}
}
if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
(port_type_array[0] == MLX4_PORT_TYPE_IB) &&
(port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
mlx4_warn(dev,
"Granular QoS per VF not supported with IB/Eth configuration\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
}
dev->caps.max_counters = dev_cap->max_counters;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
(1 << dev->caps.log_num_macs) *
(1 << dev->caps.log_num_vlans) *
dev->caps.num_ports;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
else
dev->caps.dmfs_high_rate_qpn_base =
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
} else {
dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
dev->caps.dmfs_high_rate_qpn_base =
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
}
dev->caps.rl_caps = dev_cap->rl_caps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
dev->caps.dmfs_high_rate_qpn_range;
dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
if (dev_cap->flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
}
if (dev_cap->flags2 &
(MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
}
}
if ((dev->caps.flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
mlx4_is_master(dev))
dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
if (!mlx4_is_slave(dev)) {
mlx4_enable_cqe_eqe_stride(dev);
dev->caps.alloc_res_qp_mask =
(dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
MLX4_RESERVE_A0_QP;
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
mlx4_warn(dev, "Old device ETS support detected\n");
mlx4_warn(dev, "Consider upgrading device FW.\n");
dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
}
} else {
dev->caps.alloc_res_qp_mask = 0;
}
mlx4_enable_ignore_fcs(dev);
return 0;
}
/*The function checks if there are live vf, return the num of them*/
static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_state;
int i;
int ret = 0;
for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
s_state = &priv->mfunc.master.slave_state[i];
if (s_state->active && s_state->last_cmd !=
MLX4_COMM_CMD_RESET) {
mlx4_warn(dev, "%s: slave: %d is still active\n",
__func__, i);
ret++;
}
}
return ret;
}
int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
{
u32 qk = MLX4_RESERVED_QKEY_BASE;
if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
qpn < dev->phys_caps.base_proxy_sqpn)
return -EINVAL;
if (qpn >= dev->phys_caps.base_tunnel_sqpn)
/* tunnel qp */
qk += qpn - dev->phys_caps.base_tunnel_sqpn;
else
qk += qpn - dev->phys_caps.base_proxy_sqpn;
*qkey = qk;
return 0;
}
EXPORT_SYMBOL(mlx4_get_parav_qkey);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
{
struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
if (!mlx4_is_master(dev))
return;
priv->virt2phys_pkey[slave][port - 1][i] = val;
}
EXPORT_SYMBOL(mlx4_sync_pkey_table);
void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
{
struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
if (!mlx4_is_master(dev))
return;
priv->slave_node_guids[slave] = guid;
}
EXPORT_SYMBOL(mlx4_put_slave_node_guid);
__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
if (!mlx4_is_master(dev))
return 0;
return priv->slave_node_guids[slave];
}
EXPORT_SYMBOL(mlx4_get_slave_node_guid);
int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_slave;
if (!mlx4_is_master(dev))
return 0;
s_slave = &priv->mfunc.master.slave_state[slave];
return !!s_slave->active;
}
EXPORT_SYMBOL(mlx4_is_slave_active);
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
struct _rule_hw *eth_header)
{
if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
struct mlx4_net_trans_rule_hw_eth *eth =
(struct mlx4_net_trans_rule_hw_eth *)eth_header;
struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
next_rule->rsvd == 0;
if (last_rule)
ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
}
}
EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
static void slave_adjust_steering_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *hca_param)
{
dev->caps.steering_mode = hca_param->steering_mode;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
dev->caps.fs_log_max_ucast_qp_range_size =
dev_cap->fs_log_max_ucast_qp_range_size;
} else
dev->caps.num_qp_per_mgm =
4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
mlx4_dbg(dev, "Steering mode is: %s\n",
mlx4_steering_mode_str(dev->caps.steering_mode));
}
static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev)
{
kfree(dev->caps.spec_qps);
dev->caps.spec_qps = NULL;
}
static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev)
{
struct mlx4_func_cap *func_cap;
struct mlx4_caps *caps = &dev->caps;
int i, err = 0;
func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL);
caps->spec_qps = kcalloc(caps->num_ports, sizeof(*caps->spec_qps), GFP_KERNEL);
if (!func_cap || !caps->spec_qps) {
mlx4_err(dev, "Failed to allocate memory for special qps cap\n");
err = -ENOMEM;
goto err_mem;
}
for (i = 1; i <= caps->num_ports; ++i) {
err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap);
if (err) {
mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
i, err);
goto err_mem;
}
caps->spec_qps[i - 1] = func_cap->spec_qps;
caps->port_mask[i] = caps->port_type[i];
caps->phys_port_id[i] = func_cap->phys_port_id;
err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
&caps->gid_table_len[i],
&caps->pkey_table_len[i]);
if (err) {
mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n",
i, err);
goto err_mem;
}
}
err_mem:
if (err)
mlx4_slave_destroy_special_qp_cap(dev);
kfree(func_cap);
return err;
}
static int mlx4_slave_cap(struct mlx4_dev *dev)
{
int err;
u32 page_size;
struct mlx4_dev_cap *dev_cap;
struct mlx4_func_cap *func_cap;
struct mlx4_init_hca_param *hca_param;
hca_param = kzalloc(sizeof(*hca_param), GFP_KERNEL);
func_cap = kzalloc(sizeof(*func_cap), GFP_KERNEL);
dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
if (!hca_param || !func_cap || !dev_cap) {
mlx4_err(dev, "Failed to allocate memory for slave_cap\n");
err = -ENOMEM;
goto free_mem;
}
err = mlx4_QUERY_HCA(dev, hca_param);
if (err) {
mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
goto free_mem;
}
/* fail if the hca has an unknown global capability
* at this time global_caps should be always zeroed
*/
if (hca_param->global_caps) {
mlx4_err(dev, "Unknown hca global capabilities\n");
err = -EINVAL;
goto free_mem;
}
dev->caps.hca_core_clock = hca_param->hca_core_clock;
dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp;
err = mlx4_dev_cap(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
goto free_mem;
}
err = mlx4_QUERY_FW(dev);
if (err)
mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
page_size = ~dev->caps.page_size_cap + 1;
mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
if (page_size > PAGE_SIZE) {
mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
page_size, PAGE_SIZE);
err = -ENODEV;
goto free_mem;
}
/* Set uar_page_shift for VF */
dev->uar_page_shift = hca_param->uar_page_sz + 12;
/* Make sure the master uar page size is valid */
if (dev->uar_page_shift > PAGE_SHIFT) {
mlx4_err(dev,
"Invalid configuration: uar page size is larger than system page size\n");
err = -ENODEV;
goto free_mem;
}
/* Set reserved_uars based on the uar_page_shift */
mlx4_set_num_reserved_uars(dev, dev_cap);
/* Although uar page size in FW differs from system page size,
* upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
* still works with assumption that uar page size == system page size
*/
dev->caps.uar_page_size = PAGE_SIZE;
err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap);
if (err) {
mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
err);
goto free_mem;
}
if ((func_cap->pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
PF_CONTEXT_BEHAVIOUR_MASK) {
mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
func_cap->pf_context_behaviour,
PF_CONTEXT_BEHAVIOUR_MASK);
err = -EINVAL;
goto free_mem;
}
dev->caps.num_ports = func_cap->num_ports;
dev->quotas.qp = func_cap->qp_quota;
dev->quotas.srq = func_cap->srq_quota;
dev->quotas.cq = func_cap->cq_quota;
dev->quotas.mpt = func_cap->mpt_quota;
dev->quotas.mtt = func_cap->mtt_quota;
dev->caps.num_qps = 1 << hca_param->log_num_qps;
dev->caps.num_srqs = 1 << hca_param->log_num_srqs;
dev->caps.num_cqs = 1 << hca_param->log_num_cqs;
dev->caps.num_mpts = 1 << hca_param->log_mpt_sz;
dev->caps.num_eqs = func_cap->max_eq;
dev->caps.reserved_eqs = func_cap->reserved_eq;
dev->caps.reserved_lkey = func_cap->reserved_lkey;
dev->caps.num_pds = MLX4_NUM_PDS;
dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0;
if (dev->caps.num_ports > MLX4_MAX_PORTS) {
mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
dev->caps.num_ports, MLX4_MAX_PORTS);
err = -ENODEV;
goto free_mem;
}
mlx4_replace_zero_macs(dev);
err = mlx4_slave_special_qp_cap(dev);
if (err) {
mlx4_err(dev, "Set special QP caps failed. aborting\n");
goto free_mem;
}
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
pci_resource_len(dev->persist->pdev,
2)) {
mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long)
pci_resource_len(dev->persist->pdev, 2));
err = -ENOMEM;
goto err_mem;
}
if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
dev->caps.eqe_size = 64;
dev->caps.eqe_factor = 1;
} else {
dev->caps.eqe_size = 32;
dev->caps.eqe_factor = 0;
}
if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
dev->caps.cqe_size = 64;
dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
} else {
dev->caps.cqe_size = 32;
}
if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
dev->caps.eqe_size = hca_param->eqe_size;
dev->caps.eqe_factor = 0;
}
if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
dev->caps.cqe_size = hca_param->cqe_size;
/* User still need to know when CQE > 32B */
dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
}
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN;
mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n");
slave_adjust_steering_mode(dev, dev_cap, hca_param);
mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
hca_param->rss_ip_frags ? "on" : "off");
if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
dev->caps.bf_reg_size)
dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
err_mem:
if (err)
mlx4_slave_destroy_special_qp_cap(dev);
free_mem:
kfree(hca_param);
kfree(func_cap);
kfree(dev_cap);
return err;
}
/*
* Change the port configuration of the device.
* Every user of this function must hold the port mutex.
*/
int mlx4_change_port_types(struct mlx4_dev *dev,
enum mlx4_port_type *port_types)
{
int err = 0;
int change = 0;
int port;
for (port = 0; port < dev->caps.num_ports; port++) {
/* Change the port type only if the new type is different
* from the current, and not set to Auto */
if (port_types[port] != dev->caps.port_type[port + 1])
change = 1;
}
if (change) {
mlx4_unregister_device(dev);
for (port = 1; port <= dev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(dev, port);
dev->caps.port_type[port] = port_types[port - 1];
err = mlx4_SET_PORT(dev, port, -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
goto out;
}
}
mlx4_set_port_mask(dev);
err = mlx4_register_device(dev);
if (err) {
mlx4_err(dev, "Failed to register device\n");
goto out;
}
}
out:
return err;
}
static ssize_t show_port_type(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
port_attr);
struct mlx4_dev *mdev = info->dev;
char type[8];
sprintf(type, "%s",
(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
"ib" : "eth");
if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
sprintf(buf, "auto (%s)\n", type);
else
sprintf(buf, "%s\n", type);
return strlen(buf);
}
static int __set_port_type(struct mlx4_port_info *info,
enum mlx4_port_type port_type)
{
struct mlx4_dev *mdev = info->dev;
struct mlx4_priv *priv = mlx4_priv(mdev);
enum mlx4_port_type types[MLX4_MAX_PORTS];
enum mlx4_port_type new_types[MLX4_MAX_PORTS];
int i;
int err = 0;
if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
mlx4_err(mdev,
"Requested port type for port %d is not supported on this HCA\n",
info->port);
return -EOPNOTSUPP;
}
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
info->tmp_type = port_type;
/* Possible type is always the one that was delivered */
mdev->caps.possible_type[info->port] = info->tmp_type;
for (i = 0; i < mdev->caps.num_ports; i++) {
types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
mdev->caps.possible_type[i+1];
if (types[i] == MLX4_PORT_TYPE_AUTO)
types[i] = mdev->caps.port_type[i+1];
}
if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
err = -EOPNOTSUPP;
}
}
}
if (err) {
mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
goto out;
}
mlx4_do_sense_ports(mdev, new_types, types);
err = mlx4_check_port_params(mdev, new_types);
if (err)
goto out;
/* We are about to apply the changes after the configuration
* was verified, no need to remember the temporary types
* any more */
for (i = 0; i < mdev->caps.num_ports; i++)
priv->port[i + 1].tmp_type = 0;
err = mlx4_change_port_types(mdev, new_types);
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
return err;
}
static ssize_t set_port_type(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
port_attr);
struct mlx4_dev *mdev = info->dev;
enum mlx4_port_type port_type;
static DEFINE_MUTEX(set_port_type_mutex);
int err;
mutex_lock(&set_port_type_mutex);
if (!strcmp(buf, "ib\n")) {
port_type = MLX4_PORT_TYPE_IB;
} else if (!strcmp(buf, "eth\n")) {
port_type = MLX4_PORT_TYPE_ETH;
} else if (!strcmp(buf, "auto\n")) {
port_type = MLX4_PORT_TYPE_AUTO;
} else {
mlx4_err(mdev, "%s is not supported port type\n", buf);
err = -EINVAL;
goto err_out;
}
err = __set_port_type(info, port_type);
err_out:
mutex_unlock(&set_port_type_mutex);
return err ? err : count;
}
enum ibta_mtu {
IB_MTU_256 = 1,
IB_MTU_512 = 2,
IB_MTU_1024 = 3,
IB_MTU_2048 = 4,
IB_MTU_4096 = 5
};
static inline int int_to_ibta_mtu(int mtu)
{
switch (mtu) {
case 256: return IB_MTU_256;
case 512: return IB_MTU_512;
case 1024: return IB_MTU_1024;
case 2048: return IB_MTU_2048;
case 4096: return IB_MTU_4096;
default: return -1;
}
}
static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
{
switch (mtu) {
case IB_MTU_256: return 256;
case IB_MTU_512: return 512;
case IB_MTU_1024: return 1024;
case IB_MTU_2048: return 2048;
case IB_MTU_4096: return 4096;
default: return -1;
}
}
static ssize_t show_port_ib_mtu(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
port_mtu_attr);
struct mlx4_dev *mdev = info->dev;
if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
sprintf(buf, "%d\n",
ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
return strlen(buf);
}
static ssize_t set_port_ib_mtu(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
port_mtu_attr);
struct mlx4_dev *mdev = info->dev;
struct mlx4_priv *priv = mlx4_priv(mdev);
int err, port, mtu, ibta_mtu = -1;
if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
return -EINVAL;
}
err = kstrtoint(buf, 0, &mtu);
if (!err)
ibta_mtu = int_to_ibta_mtu(mtu);
if (err || ibta_mtu < 0) {
mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
return -EINVAL;
}
mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
mlx4_unregister_device(mdev);
for (port = 1; port <= mdev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(mdev, port);
err = mlx4_SET_PORT(mdev, port, -1);
if (err) {
mlx4_err(mdev, "Failed to set port %d, aborting\n",
port);
goto err_set_port;
}
}
err = mlx4_register_device(mdev);
err_set_port:
mutex_unlock(&priv->port_mutex);
mlx4_start_sense(mdev);
return err ? err : count;
}
/* bond for multi-function device */
#define MAX_MF_BOND_ALLOWED_SLAVES 63
static int mlx4_mf_bond(struct mlx4_dev *dev)
{
int err = 0;
int nvfs;
struct mlx4_slaves_pport slaves_port1;
struct mlx4_slaves_pport slaves_port2;
slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
/* only single port vfs are allowed */
if (bitmap_weight_and(slaves_port1.slaves, slaves_port2.slaves,
dev->persist->num_vfs + 1) > 1) {
mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
return -EINVAL;
}
/* number of virtual functions is number of total functions minus one
* physical function for each port.
*/
nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
/* limit on maximum allowed VFs */
if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
return -EINVAL;
}
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
return -EINVAL;
}
err = mlx4_bond_mac_table(dev);
if (err)
return err;
err = mlx4_bond_vlan_table(dev);
if (err)
goto err1;
err = mlx4_bond_fs_rules(dev);
if (err)
goto err2;
return 0;
err2:
(void)mlx4_unbond_vlan_table(dev);
err1:
(void)mlx4_unbond_mac_table(dev);
return err;
}
static int mlx4_mf_unbond(struct mlx4_dev *dev)
{
int ret, ret1;
ret = mlx4_unbond_fs_rules(dev);
if (ret)
mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
ret1 = mlx4_unbond_mac_table(dev);
if (ret1) {
mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
ret = ret1;
}
ret1 = mlx4_unbond_vlan_table(dev);
if (ret1) {
mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
ret = ret1;
}
return ret;
}
static int mlx4_bond(struct mlx4_dev *dev)
{
int ret = 0;
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_lock(&priv->bond_mutex);
if (!mlx4_is_bonded(dev)) {
ret = mlx4_do_bond(dev, true);
if (ret)
mlx4_err(dev, "Failed to bond device: %d\n", ret);
if (!ret && mlx4_is_master(dev)) {
ret = mlx4_mf_bond(dev);
if (ret) {
mlx4_err(dev, "bond for multifunction failed\n");
mlx4_do_bond(dev, false);
}
}
}
mutex_unlock(&priv->bond_mutex);
if (!ret)
mlx4_dbg(dev, "Device is bonded\n");
return ret;
}
static int mlx4_unbond(struct mlx4_dev *dev)
{
int ret = 0;
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_lock(&priv->bond_mutex);
if (mlx4_is_bonded(dev)) {
int ret2 = 0;
ret = mlx4_do_bond(dev, false);
if (ret)
mlx4_err(dev, "Failed to unbond device: %d\n", ret);
if (mlx4_is_master(dev))
ret2 = mlx4_mf_unbond(dev);
if (ret2) {
mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
ret = ret2;
}
}
mutex_unlock(&priv->bond_mutex);
if (!ret)
mlx4_dbg(dev, "Device is unbonded\n");
return ret;
}
static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
{
u8 port1 = v2p->port1;
u8 port2 = v2p->port2;
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
return -EOPNOTSUPP;
mutex_lock(&priv->bond_mutex);
/* zero means keep current mapping for this port */
if (port1 == 0)
port1 = priv->v2p.port1;
if (port2 == 0)
port2 = priv->v2p.port2;
if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
(port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
(port1 == 2 && port2 == 1)) {
/* besides boundary checks cross mapping makes
* no sense and therefore not allowed */
err = -EINVAL;
} else if ((port1 == priv->v2p.port1) &&
(port2 == priv->v2p.port2)) {
err = 0;
} else {
err = mlx4_virt2phy_port_map(dev, port1, port2);
if (!err) {
mlx4_dbg(dev, "port map changed: [%d][%d]\n",
port1, port2);
priv->v2p.port1 = port1;
priv->v2p.port2 = port2;
} else {
mlx4_err(dev, "Failed to change port mape: %d\n", err);
}
}
mutex_unlock(&priv->bond_mutex);
return err;
}
struct mlx4_bond {
struct work_struct work;
struct mlx4_dev *dev;
int is_bonded;
struct mlx4_port_map port_map;
};
static void mlx4_bond_work(struct work_struct *work)
{
struct mlx4_bond *bond = container_of(work, struct mlx4_bond, work);
int err = 0;
if (bond->is_bonded) {
if (!mlx4_is_bonded(bond->dev)) {
err = mlx4_bond(bond->dev);
if (err)
mlx4_err(bond->dev, "Fail to bond device\n");
}
if (!err) {
err = mlx4_port_map_set(bond->dev, &bond->port_map);
if (err)
mlx4_err(bond->dev,
"Fail to set port map [%d][%d]: %d\n",
bond->port_map.port1,
bond->port_map.port2, err);
}
} else if (mlx4_is_bonded(bond->dev)) {
err = mlx4_unbond(bond->dev);
if (err)
mlx4_err(bond->dev, "Fail to unbond device\n");
}
put_device(&bond->dev->persist->pdev->dev);
kfree(bond);
}
int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1,
u8 v2p_p2)
{
struct mlx4_bond *bond;
bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
if (!bond)
return -ENOMEM;
INIT_WORK(&bond->work, mlx4_bond_work);
get_device(&dev->persist->pdev->dev);
bond->dev = dev;
bond->is_bonded = is_bonded;
bond->port_map.port1 = v2p_p1;
bond->port_map.port2 = v2p_p2;
queue_work(mlx4_wq, &bond->work);
return 0;
}
EXPORT_SYMBOL(mlx4_queue_bond_work);
static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.fw_icm) {
mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
if (err) {
mlx4_err(dev, "MAP_FA command failed, aborting\n");
goto err_free;
}
err = mlx4_RUN_FW(dev);
if (err) {
mlx4_err(dev, "RUN_FW command failed, aborting\n");
goto err_unmap_fa;
}
return 0;
err_unmap_fa:
mlx4_UNMAP_FA(dev);
err_free:
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
return err;
}
static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
int cmpt_entry_sz)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
int num_eqs;
err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_QP *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err)
goto err;
err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_SRQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_srqs,
dev->caps.reserved_srqs, 0, 0);
if (err)
goto err_qp;
err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_CQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_cqs,
dev->caps.reserved_cqs, 0, 0);
if (err)
goto err_srq;
num_eqs = dev->phys_caps.num_phys_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
if (err)
goto err_cq;
return 0;
err_cq:
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
err_srq:
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
err_qp:
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
err:
return err;
}
static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca, u64 icm_size)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 aux_pages;
int num_eqs;
int err;
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
if (err) {
mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
return err;
}
mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.aux_icm) {
mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
if (err) {
mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
goto err_free_aux;
}
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
if (err) {
mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
goto err_unmap_aux;
}
num_eqs = dev->phys_caps.num_phys_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
goto err_unmap_cmpt;
}
/*
* Reserved MTT entries must be aligned up to a cacheline
* boundary, since the FW will write to them, while the driver
* writes to all other MTT entries. (The variable
* dev->caps.mtt_entry_sz below is really the MTT segment
* size, not the raw entry size)
*/
dev->caps.reserved_mtts =
ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
init_hca->mtt_base,
dev->caps.mtt_entry_sz,
dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
goto err_unmap_eq;
}
err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
init_hca->dmpt_base,
dev_cap->dmpt_entry_sz,
dev->caps.num_mpts,
dev->caps.reserved_mrws, 1, 1);
if (err) {
mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
goto err_unmap_mtt;
}
err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
init_hca->qpc_base,
dev_cap->qpc_entry_sz,
dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
mlx4_err(dev, "Failed to map QP context memory, aborting\n");
goto err_unmap_dmpt;
}
err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
init_hca->auxc_base,
dev_cap->aux_entry_sz,
dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
goto err_unmap_qp;
}
err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
init_hca->altc_base,
dev_cap->altc_entry_sz,
dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
goto err_unmap_auxc;
}
err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
init_hca->rdmarc_base,
dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
goto err_unmap_altc;
}
err = mlx4_init_icm_table(dev, &priv->cq_table.table,
init_hca->cqc_base,
dev_cap->cqc_entry_sz,
dev->caps.num_cqs,
dev->caps.reserved_cqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
goto err_unmap_rdmarc;
}
err = mlx4_init_icm_table(dev, &priv->srq_table.table,
init_hca->srqc_base,
dev_cap->srq_entry_sz,
dev->caps.num_srqs,
dev->caps.reserved_srqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
goto err_unmap_cq;
}
/*
* For flow steering device managed mode it is required to use
* mlx4_init_icm_table. For B0 steering mode it's not strictly
* required, but for simplicity just map the whole multicast
* group table now. The table isn't very big and it's a lot
* easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
init_hca->mc_base,
mlx4_get_mgm_entry_size(dev),
dev->caps.num_mgms + dev->caps.num_amgms,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
if (err) {
mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
goto err_unmap_srq;
}
return 0;
err_unmap_srq:
mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
err_unmap_cq:
mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
err_unmap_rdmarc:
mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
err_unmap_altc:
mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
err_unmap_auxc:
mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
err_unmap_qp:
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
err_unmap_dmpt:
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
err_unmap_mtt:
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
err_unmap_eq:
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
err_unmap_cmpt:
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
err_unmap_aux:
mlx4_UNMAP_ICM_AUX(dev);
err_free_aux:
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
return err;
}
static void mlx4_free_icms(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
mlx4_UNMAP_ICM_AUX(dev);
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
static void mlx4_slave_exit(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
MLX4_COMM_TIME))
mlx4_warn(dev, "Failed to close slave function\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
static int map_bf_area(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
resource_size_t bf_start;
resource_size_t bf_len;
int err = 0;
if (!dev->caps.bf_reg_size)
return -ENXIO;
bf_start = pci_resource_start(dev->persist->pdev, 2) +
(dev->caps.num_uars << PAGE_SHIFT);
bf_len = pci_resource_len(dev->persist->pdev, 2) -
(dev->caps.num_uars << PAGE_SHIFT);
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
if (!priv->bf_mapping)
err = -ENOMEM;
return err;
}
static void unmap_bf_area(struct mlx4_dev *dev)
{
if (mlx4_priv(dev)->bf_mapping)
io_mapping_free(mlx4_priv(dev)->bf_mapping);
}
u64 mlx4_read_clock(struct mlx4_dev *dev)
{
u32 clockhi, clocklo, clockhi1;
u64 cycles;
int i;
struct mlx4_priv *priv = mlx4_priv(dev);
for (i = 0; i < 10; i++) {
clockhi = swab32(readl(priv->clock_mapping));
clocklo = swab32(readl(priv->clock_mapping + 4));
clockhi1 = swab32(readl(priv->clock_mapping));
if (clockhi == clockhi1)
break;
}
cycles = (u64) clockhi << 32 | (u64) clocklo;
return cycles;
}
EXPORT_SYMBOL_GPL(mlx4_read_clock);
static int map_internal_clock(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
priv->clock_mapping =
ioremap(pci_resource_start(dev->persist->pdev,
priv->fw.clock_bar) +
priv->fw.clock_offset, MLX4_CLOCK_SIZE);
if (!priv->clock_mapping)
return -ENOMEM;
return 0;
}
int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
struct mlx4_clock_params *params)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (mlx4_is_slave(dev))
return -EOPNOTSUPP;
if (!dev->caps.map_clock_to_user) {
mlx4_dbg(dev, "Map clock to user is not supported.\n");
return -EOPNOTSUPP;
}
if (!params)
return -EINVAL;
params->bar = priv->fw.clock_bar;
params->offset = priv->fw.clock_offset;
params->size = MLX4_CLOCK_SIZE;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
static void unmap_internal_clock(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (priv->clock_mapping)
iounmap(priv->clock_mapping);
}
static void mlx4_close_hca(struct mlx4_dev *dev)
{
unmap_internal_clock(dev);
unmap_bf_area(dev);
if (mlx4_is_slave(dev))
mlx4_slave_exit(dev);
else {
mlx4_CLOSE_HCA(dev, 0);
mlx4_free_icms(dev);
}
}
static void mlx4_close_fw(struct mlx4_dev *dev)
{
if (!mlx4_is_slave(dev)) {
mlx4_UNMAP_FA(dev);
mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
}
}
static int mlx4_comm_check_offline(struct mlx4_dev *dev)
{
#define COMM_CHAN_OFFLINE_OFFSET 0x09
u32 comm_flags;
u32 offline_bit;
unsigned long end;
struct mlx4_priv *priv = mlx4_priv(dev);
end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
while (time_before(jiffies, end)) {
comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
MLX4_COMM_CHAN_FLAGS));
offline_bit = (comm_flags &
(u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
if (!offline_bit)
return 0;
/* If device removal has been requested,
* do not continue retrying.
*/
if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_NOWAIT)
break;
/* There are cases as part of AER/Reset flow that PF needs
* around 100 msec to load. We therefore sleep for 100 msec
* to allow other tasks to make use of that CPU during this
* time interval.
*/
msleep(100);
}
mlx4_err(dev, "Communication channel is offline.\n");
return -EIO;
}
static void mlx4_reset_vf_support(struct mlx4_dev *dev)
{
#define COMM_CHAN_RST_OFFSET 0x1e
struct mlx4_priv *priv = mlx4_priv(dev);
u32 comm_rst;
u32 comm_caps;
comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
MLX4_COMM_CHAN_CAPS));
comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
if (comm_rst)
dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
}
static int mlx4_init_slave(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 dma = (u64) priv->mfunc.vhcr_dma;
int ret_from_reset = 0;
u32 slave_read;
u32 cmd_channel_ver;
if (atomic_read(&pf_loading)) {
mlx4_warn(dev, "PF is not ready - Deferring probe\n");
return -EPROBE_DEFER;
}
mutex_lock(&priv->cmd.slave_cmd_mutex);
priv->cmd.max_cmds = 1;
if (mlx4_comm_check_offline(dev)) {
mlx4_err(dev, "PF is not responsive, skipping initialization\n");
goto err_offline;
}
mlx4_reset_vf_support(dev);
mlx4_warn(dev, "Sending reset\n");
ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
/* if we are in the middle of flr the slave will try
* NUM_OF_RESET_RETRIES times before leaving.*/
if (ret_from_reset) {
if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return -EPROBE_DEFER;
} else
goto err;
}
/* check the driver version - the slave I/F revision
* must match the master's */
slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
cmd_channel_ver = mlx4_comm_get_version();
if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
MLX4_COMM_GET_IF_REV(slave_read)) {
mlx4_err(dev, "slave driver version is not supported by the master\n");
goto err;
}
mlx4_warn(dev, "Sending vhcr0\n");
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
goto err;
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
goto err;
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
goto err;
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
goto err;
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return 0;
err:
mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
err_offline:
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return -EIO;
}
static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
{
int i;
for (i = 1; i <= dev->caps.num_ports; i++) {
if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
dev->caps.gid_table_len[i] =
mlx4_get_slave_num_gids(dev, 0, i);
else
dev->caps.gid_table_len[i] = 1;
dev->caps.pkey_table_len[i] =
dev->phys_caps.pkey_phys_table_len[i] - 1;
}
}
static int choose_log_fs_mgm_entry_size(int qp_per_entry)
{
int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
i++) {
if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
break;
}
return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
}
static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
{
switch (dmfs_high_steer_mode) {
case MLX4_STEERING_DMFS_A0_DEFAULT:
return "default performance";
case MLX4_STEERING_DMFS_A0_DYNAMIC:
return "dynamic hybrid mode";
case MLX4_STEERING_DMFS_A0_STATIC:
return "performance optimized for limited rule configuration (static)";
case MLX4_STEERING_DMFS_A0_DISABLE:
return "disabled performance optimized steering";
case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
return "performance optimized steering not supported";
default:
return "Unrecognized mode";
}
}
#define MLX4_DMFS_A0_STEERING (1UL << 2)
static void choose_steering_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
if (mlx4_log_num_mgm_entry_size <= 0) {
if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
if (dev->caps.dmfs_high_steer_mode ==
MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
mlx4_err(dev, "DMFS high rate mode not supported\n");
else
dev->caps.dmfs_high_steer_mode =
MLX4_STEERING_DMFS_A0_STATIC;
}
}
if (mlx4_log_num_mgm_entry_size <= 0 &&
dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
(!mlx4_is_mfunc(dev) ||
(dev_cap->fs_max_num_qp_per_entry >=
(dev->persist->num_vfs + 1))) &&
choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
dev->oper_log_mgm_entry_size =
choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
dev->caps.fs_log_max_ucast_qp_range_size =
dev_cap->fs_log_max_ucast_qp_range_size;
} else {
if (dev->caps.dmfs_high_steer_mode !=
MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
else {
dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
}
dev->oper_log_mgm_entry_size =
mlx4_log_num_mgm_entry_size > 0 ?
mlx4_log_num_mgm_entry_size :
MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
}
mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
mlx4_steering_mode_str(dev->caps.steering_mode),
dev->oper_log_mgm_entry_size,
mlx4_log_num_mgm_entry_size);
}
static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
else
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
}
static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
{
int i;
struct mlx4_port_cap port_cap;
if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
return -EINVAL;
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_dev_port(dev, i, &port_cap)) {
mlx4_err(dev,
"QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
} else if ((dev->caps.dmfs_high_steer_mode !=
MLX4_STEERING_DMFS_A0_DEFAULT) &&
(port_cap.dmfs_optimized_state ==
!!(dev->caps.dmfs_high_steer_mode ==
MLX4_STEERING_DMFS_A0_DISABLE))) {
mlx4_err(dev,
"DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
dmfs_high_rate_steering_mode_str(
dev->caps.dmfs_high_steer_mode),
(port_cap.dmfs_optimized_state ?
"enabled" : "disabled"));
}
}
return 0;
}
static int mlx4_init_fw(struct mlx4_dev *dev)
{
struct mlx4_mod_stat_cfg mlx4_cfg;
int err = 0;
if (!mlx4_is_slave(dev)) {
err = mlx4_QUERY_FW(dev);
if (err) {
if (err == -EACCES)
mlx4_info(dev, "non-primary physical function, skipping\n");
else
mlx4_err(dev, "QUERY_FW command failed, aborting\n");
return err;
}
err = mlx4_load_fw(dev);
if (err) {
mlx4_err(dev, "Failed to start FW, aborting\n");
return err;
}
mlx4_cfg.log_pg_sz_m = 1;
mlx4_cfg.log_pg_sz = 0;
err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
if (err)
mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
}
return err;
}
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_init_hca_param *init_hca = NULL;
struct mlx4_dev_cap *dev_cap = NULL;
struct mlx4_adapter adapter;
struct mlx4_profile profile;
u64 icm_size;
struct mlx4_config_dev_params params;
int err;
if (!mlx4_is_slave(dev)) {
dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL);
if (!dev_cap || !init_hca) {
err = -ENOMEM;
goto out_free;
}
err = mlx4_dev_cap(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
goto out_free;
}
choose_steering_mode(dev, dev_cap);
choose_tunnel_offload_mode(dev, dev_cap);
if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
mlx4_is_master(dev))
dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
err = mlx4_get_phys_port_id(dev);
if (err)
mlx4_err(dev, "Fail to get physical port id\n");
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
if (mlx4_low_memory_profile()) {
mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
profile = low_mem_profile;
} else {
profile = default_profile;
}
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
profile.num_mcg = MLX4_FS_NUM_MCG;
icm_size = mlx4_make_profile(dev, &profile, dev_cap,
init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
goto out_free;
}
if (enable_4k_uar || !dev->persist->num_vfs) {
init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
} else {
init_hca->log_uar_sz = ilog2(dev->caps.num_uars);
init_hca->uar_page_sz = PAGE_SHIFT - 12;
}
init_hca->mw_enabled = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE;
err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size);
if (err)
goto out_free;
err = mlx4_INIT_HCA(dev, init_hca);
if (err) {
mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
err = mlx4_query_func(dev, dev_cap);
if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
dev->caps.num_eqs = dev_cap->max_eqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
dev->caps.reserved_uars = dev_cap->reserved_uars;
}
}
/*
* If TS is supported by FW
* read HCA frequency by QUERY_HCA command
*/
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
err = mlx4_QUERY_HCA(dev, init_hca);
if (err) {
mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
} else {
dev->caps.hca_core_clock =
init_hca->hca_core_clock;
}
/* In case we got HCA frequency 0 - disable timestamping
* to avoid dividing by zero
*/
if (!dev->caps.hca_core_clock) {
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_err(dev,
"HCA frequency is 0 - timestamping is not supported\n");
} else if (map_internal_clock(dev)) {
/*
* Map internal clock,
* in case of failure disable timestamping
*/
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
}
}
if (dev->caps.dmfs_high_steer_mode !=
MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
if (mlx4_validate_optimized_steering(dev))
mlx4_warn(dev, "Optimized steering validation failed\n");
if (dev->caps.dmfs_high_steer_mode ==
MLX4_STEERING_DMFS_A0_DISABLE) {
dev->caps.dmfs_high_rate_qpn_base =
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
dev->caps.dmfs_high_rate_qpn_range =
MLX4_A0_STEERING_TABLE_SIZE;
}
mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
dmfs_high_rate_steering_mode_str(
dev->caps.dmfs_high_steer_mode));
}
} else {
err = mlx4_init_slave(dev);
if (err) {
if (err != -EPROBE_DEFER)
mlx4_err(dev, "Failed to initialize slave\n");
return err;
}
err = mlx4_slave_cap(dev);
if (err) {
mlx4_err(dev, "Failed to obtain slave caps\n");
goto err_close;
}
}
if (map_bf_area(dev))
mlx4_dbg(dev, "Failed to map blue flame area\n");
/*Only the master set the ports, all the rest got it from it.*/
if (!mlx4_is_slave(dev))
mlx4_set_port_mask(dev);
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
goto unmap_bf;
}
/* Query CONFIG_DEV parameters */
err = mlx4_config_dev_retrieval(dev, ¶ms);
if (err && err != -EOPNOTSUPP) {
mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
} else if (!err) {
dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
}
priv->eq_table.inta_pin = adapter.inta_pin;
memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
err = 0;
goto out_free;
unmap_bf:
unmap_internal_clock(dev);
unmap_bf_area(dev);
if (mlx4_is_slave(dev))
mlx4_slave_destroy_special_qp_cap(dev);
err_close:
if (mlx4_is_slave(dev))
mlx4_slave_exit(dev);
else
mlx4_CLOSE_HCA(dev, 0);
err_free_icm:
if (!mlx4_is_slave(dev))
mlx4_free_icms(dev);
out_free:
kfree(dev_cap);
kfree(init_hca);
return err;
}
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int nent_pow2;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
if (!dev->caps.max_counters)
return -ENOSPC;
nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
/* reserve last counter index for sink counter */
return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
nent_pow2 - 1, 0,
nent_pow2 - dev->caps.max_counters + 1);
}
static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return;
if (!dev->caps.max_counters)
return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
}
static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int port;
for (port = 0; port < dev->caps.num_ports; port++)
if (priv->def_counter[port] != -1)
mlx4_counter_free(dev, priv->def_counter[port]);
}
static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int port, err = 0;
u32 idx;
for (port = 0; port < dev->caps.num_ports; port++)
priv->def_counter[port] = -1;
for (port = 0; port < dev->caps.num_ports; port++) {
err = mlx4_counter_alloc(dev, &idx, MLX4_RES_USAGE_DRIVER);
if (!err || err == -ENOSPC) {
priv->def_counter[port] = idx;
err = 0;
} else if (err == -ENOENT) {
err = 0;
continue;
} else if (mlx4_is_slave(dev) && err == -EINVAL) {
priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
MLX4_SINK_COUNTER_INDEX(dev));
err = 0;
} else {
mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
__func__, port + 1, err);
mlx4_cleanup_default_counters(dev);
return err;
}
mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
__func__, priv->def_counter[port], port + 1);
}
return err;
}
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
if (*idx == -1) {
*idx = MLX4_SINK_COUNTER_INDEX(dev);
return -ENOSPC;
}
return 0;
}
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
{
u32 in_modifier = RES_COUNTER | (((u32)usage & 3) << 30);
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
*idx = get_param_l(&out_param);
if (WARN_ON(err == -ENOSPC))
err = -EINVAL;
return err;
}
return __mlx4_counter_alloc(dev, idx);
}
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
u8 counter_index)
{
struct mlx4_cmd_mailbox *if_stat_mailbox;
int err;
u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(if_stat_mailbox))
return PTR_ERR(if_stat_mailbox);
err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
return err;
}
void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return;
if (idx == MLX4_SINK_COUNTER_INDEX(dev))
return;
__mlx4_clear_if_stat(dev, idx);
mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
return;
}
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, idx);
mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
return;
}
__mlx4_counter_free(dev, idx);
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);
int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return priv->def_counter[port - 1];
}
EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
}
EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return priv->mfunc.master.vf_admin[entry].vport[port].guid;
}
EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
{
struct mlx4_priv *priv = mlx4_priv(dev);
__be64 guid;
/* hw GUID */
if (entry == 0)
return;
get_random_bytes((char *)&guid, sizeof(guid));
guid &= ~(cpu_to_be64(1ULL << 56));
guid |= cpu_to_be64(1ULL << 57);
priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
}
static int mlx4_setup_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
int port;
__be32 ib_port_default_caps;
err = mlx4_init_uar_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
if (err) {
mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
goto err_uar_table_free;
}
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!priv->kar) {
mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mlx4_init_pd_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
goto err_kar_unmap;
}
err = mlx4_init_xrcd_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
goto err_pd_table_free;
}
err = mlx4_init_mr_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
goto err_xrcd_table_free;
}
if (!mlx4_is_slave(dev)) {
err = mlx4_init_mcg_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
goto err_mr_table_free;
}
err = mlx4_config_mad_demux(dev);
if (err) {
mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
goto err_mcg_table_free;
}
}
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
goto err_mcg_table_free;
}
err = mlx4_cmd_use_events(dev);
if (err) {
mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
goto err_eq_table_free;
}
err = mlx4_NOP(dev);
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
goto err_cmd_poll;
}
mlx4_dbg(dev, "NOP command IRQ test passed\n");
err = mlx4_init_cq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
goto err_cmd_poll;
}
err = mlx4_init_srq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
goto err_cq_table_free;
}
err = mlx4_init_qp_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
goto err_srq_table_free;
}
if (!mlx4_is_slave(dev)) {
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting\n");
goto err_qp_table_free;
}
}
err = mlx4_allocate_default_counters(dev);
if (err) {
mlx4_err(dev, "Failed to allocate default counters, aborting\n");
goto err_counters_table_free;
}
if (!mlx4_is_slave(dev)) {
for (port = 1; port <= dev->caps.num_ports; port++) {
ib_port_default_caps = 0;
err = mlx4_get_port_ib_caps(dev, port,
&ib_port_default_caps);
if (err)
mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
/* initialize per-slave default ib port capabilities */
if (mlx4_is_master(dev)) {
int i;
for (i = 0; i < dev->num_slaves; i++) {
if (i == mlx4_master_func_num(dev))
continue;
priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
ib_port_default_caps;
}
}
if (mlx4_is_mfunc(dev))
dev->caps.port_ib_mtu[port] = IB_MTU_2048;
else
dev->caps.port_ib_mtu[port] = IB_MTU_4096;
err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
dev->caps.pkey_table_len[port] : -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
goto err_default_countes_free;
}
}
}
return 0;
err_default_countes_free:
mlx4_cleanup_default_counters(dev);
err_counters_table_free:
if (!mlx4_is_slave(dev))
mlx4_cleanup_counters_table(dev);
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
err_srq_table_free:
mlx4_cleanup_srq_table(dev);
err_cq_table_free:
mlx4_cleanup_cq_table(dev);
err_cmd_poll:
mlx4_cmd_use_polling(dev);
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
err_mcg_table_free:
if (!mlx4_is_slave(dev))
mlx4_cleanup_mcg_table(dev);
err_mr_table_free:
mlx4_cleanup_mr_table(dev);
err_xrcd_table_free:
mlx4_cleanup_xrcd_table(dev);
err_pd_table_free:
mlx4_cleanup_pd_table(dev);
err_kar_unmap:
iounmap(priv->kar);
err_uar_free:
mlx4_uar_free(dev, &priv->driver_uar);
err_uar_table_free:
mlx4_cleanup_uar_table(dev);
return err;
}
static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
{
int requested_cpu = 0;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eq *eq;
int off = 0;
int i;
if (eqn > dev->caps.num_comp_vectors)
return -EINVAL;
for (i = 1; i < port; i++)
off += mlx4_get_eqs_per_port(dev, i);
requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
/* Meaning EQs are shared, and this call comes from the second port */
if (requested_cpu < 0)
return 0;
eq = &priv->eq_table.eq[eqn];
if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(requested_cpu, eq->affinity_mask);
return 0;
}
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int i;
int port = 0;
if (msi_x) {
int nreq = min3(dev->caps.num_ports *
(int)num_online_cpus() + 1,
dev->caps.num_eqs - dev->caps.reserved_eqs,
MAX_MSIX);
if (msi_x > 1)
nreq = min_t(int, nreq, msi_x);
entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL);
if (!entries)
goto no_msi;
for (i = 0; i < nreq; ++i)
entries[i].entry = i;
nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
nreq);
if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
kfree(entries);
goto no_msi;
}
/* 1 is reserved for events (asyncrounous EQ) */
dev->caps.num_comp_vectors = nreq - 1;
priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
dev->caps.num_ports);
for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
if (i == MLX4_EQ_ASYNC)
continue;
priv->eq_table.eq[i].irq =
entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
dev->caps.num_ports);
/* We don't set affinity hint when there
* aren't enough EQs
*/
} else {
set_bit(port,
priv->eq_table.eq[i].actv_ports.ports);
if (mlx4_init_affinity_hint(dev, port + 1, i))
mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
i);
}
/* We divide the Eqs evenly between the two ports.
* (dev->caps.num_comp_vectors / dev->caps.num_ports)
* refers to the number of Eqs per port
* (i.e eqs_per_port). Theoretically, we would like to
* write something like (i + 1) % eqs_per_port == 0.
* However, since there's an asynchronous Eq, we have
* to skip over it by comparing this condition to
* !!((i + 1) > MLX4_EQ_ASYNC).
*/
if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
((i + 1) %
(dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
!!((i + 1) > MLX4_EQ_ASYNC))
/* If dev->caps.num_comp_vectors < dev->caps.num_ports,
* everything is shared anyway.
*/
port++;
}
dev->flags |= MLX4_FLAG_MSI_X;
kfree(entries);
return;
}
no_msi:
dev->caps.num_comp_vectors = 1;
BUG_ON(MLX4_EQ_ASYNC >= 2);
for (i = 0; i < 2; ++i) {
priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
if (i != MLX4_EQ_ASYNC) {
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
dev->caps.num_ports);
}
}
}
static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
enum devlink_port_type port_type)
{
struct mlx4_port_info *info = container_of(devlink_port,
struct mlx4_port_info,
devlink_port);
enum mlx4_port_type mlx4_port_type;
switch (port_type) {
case DEVLINK_PORT_TYPE_AUTO:
mlx4_port_type = MLX4_PORT_TYPE_AUTO;
break;
case DEVLINK_PORT_TYPE_ETH:
mlx4_port_type = MLX4_PORT_TYPE_ETH;
break;
case DEVLINK_PORT_TYPE_IB:
mlx4_port_type = MLX4_PORT_TYPE_IB;
break;
default:
return -EOPNOTSUPP;
}
return __set_port_type(info, mlx4_port_type);
}
static const struct devlink_port_ops mlx4_devlink_port_ops = {
.port_type_set = mlx4_devlink_port_type_set,
};
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
{
struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err;
err = devl_port_register_with_ops(devlink, &info->devlink_port, port,
&mlx4_devlink_port_ops);
if (err)
return err;
/* Ethernet and IB drivers will normally set the port type,
* but if they are not built set the type now to prevent
* devlink_port_type_warn() from firing.
*/
if (!IS_ENABLED(CONFIG_MLX4_EN) &&
dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
devlink_port_type_eth_set(&info->devlink_port);
else if (!IS_ENABLED(CONFIG_MLX4_INFINIBAND) &&
dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
devlink_port_type_ib_set(&info->devlink_port, NULL);
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
mlx4_init_roce_gid_table(dev, &info->gid_table);
info->base_qpn = mlx4_get_base_qpn(dev, port);
}
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
if (mlx4_is_mfunc(dev)) {
info->port_attr.attr.mode = 0444;
} else {
info->port_attr.attr.mode = 0644;
info->port_attr.store = set_port_type;
}
info->port_attr.show = show_port_type;
sysfs_attr_init(&info->port_attr.attr);
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
info->port_mtu_attr.attr.name = info->dev_mtu_name;
if (mlx4_is_mfunc(dev)) {
info->port_mtu_attr.attr.mode = 0444;
} else {
info->port_mtu_attr.attr.mode = 0644;
info->port_mtu_attr.store = set_port_ib_mtu;
}
info->port_mtu_attr.show = show_port_ib_mtu;
sysfs_attr_init(&info->port_mtu_attr.attr);
err = device_create_file(&dev->persist->pdev->dev,
&info->port_mtu_attr);
if (err) {
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
return 0;
}
static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
{
if (info->port < 0)
return;
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
devlink_port_type_clear(&info->devlink_port);
devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
info->rmap = NULL;
#endif
}
static int mlx4_init_steering(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int num_entries = dev->caps.num_ports;
int i, j;
priv->steer = kcalloc(num_entries, sizeof(struct mlx4_steer),
GFP_KERNEL);
if (!priv->steer)
return -ENOMEM;
for (i = 0; i < num_entries; i++)
for (j = 0; j < MLX4_NUM_STEERS; j++) {
INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
}
return 0;
}
static void mlx4_clear_steering(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_steer_index *entry, *tmp_entry;
struct mlx4_promisc_qp *pqp, *tmp_pqp;
int num_entries = dev->caps.num_ports;
int i, j;
for (i = 0; i < num_entries; i++) {
for (j = 0; j < MLX4_NUM_STEERS; j++) {
list_for_each_entry_safe(pqp, tmp_pqp,
&priv->steer[i].promisc_qps[j],
list) {
list_del(&pqp->list);
kfree(pqp);
}
list_for_each_entry_safe(entry, tmp_entry,
&priv->steer[i].steer_entries[j],
list) {
list_del(&entry->list);
list_for_each_entry_safe(pqp, tmp_pqp,
&entry->duplicates,
list) {
list_del(&pqp->list);
kfree(pqp);
}
kfree(entry);
}
}
}
kfree(priv->steer);
}
static int extended_func_num(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
}
#define MLX4_OWNER_BASE 0x8069c
#define MLX4_OWNER_SIZE 4
static int mlx4_get_ownership(struct mlx4_dev *dev)
{
void __iomem *owner;
u32 ret;
if (pci_channel_offline(dev->persist->pdev))
return -EIO;
owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
MLX4_OWNER_BASE,
MLX4_OWNER_SIZE);
if (!owner) {
mlx4_err(dev, "Failed to obtain ownership bit\n");
return -ENOMEM;
}
ret = readl(owner);
iounmap(owner);
return (int) !!ret;
}
static void mlx4_free_ownership(struct mlx4_dev *dev)
{
void __iomem *owner;
if (pci_channel_offline(dev->persist->pdev))
return;
owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
MLX4_OWNER_BASE,
MLX4_OWNER_SIZE);
if (!owner) {
mlx4_err(dev, "Failed to obtain ownership bit\n");
return;
}
writel(0, owner);
msleep(1000);
iounmap(owner);
}
#define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
!!((flags) & MLX4_FLAG_MASTER))
static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
u8 total_vfs, int existing_vfs, int reset_flow)
{
u64 dev_flags = dev->flags;
int err = 0;
int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
MLX4_MAX_NUM_VF);
if (reset_flow) {
dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
GFP_KERNEL);
if (!dev->dev_vfs)
goto free_mem;
return dev_flags;
}
atomic_inc(&pf_loading);
if (dev->flags & MLX4_FLAG_SRIOV) {
if (existing_vfs != total_vfs) {
mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
existing_vfs, total_vfs);
total_vfs = existing_vfs;
}
}
dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), GFP_KERNEL);
if (NULL == dev->dev_vfs) {
mlx4_err(dev, "Failed to allocate memory for VFs\n");
goto disable_sriov;
}
if (!(dev->flags & MLX4_FLAG_SRIOV)) {
if (total_vfs > fw_enabled_sriov_vfs) {
mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
total_vfs, fw_enabled_sriov_vfs);
err = -ENOMEM;
goto disable_sriov;
}
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
err = pci_enable_sriov(pdev, total_vfs);
}
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
goto disable_sriov;
} else {
mlx4_warn(dev, "Running in master mode\n");
dev_flags |= MLX4_FLAG_SRIOV |
MLX4_FLAG_MASTER;
dev_flags &= ~MLX4_FLAG_SLAVE;
dev->persist->num_vfs = total_vfs;
}
return dev_flags;
disable_sriov:
atomic_dec(&pf_loading);
free_mem:
dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
dev->dev_vfs = NULL;
return dev_flags & ~MLX4_FLAG_MASTER;
}
enum {
MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
};
static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
int *nvfs)
{
int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
/* Checking for 64 VFs as a limitation of CX2 */
if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
requested_vfs >= 64) {
mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
requested_vfs);
return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
}
return 0;
}
static int mlx4_pci_enable_device(struct mlx4_dev *dev)
{
struct pci_dev *pdev = dev->persist->pdev;
int err = 0;
mutex_lock(&dev->persist->pci_status_mutex);
if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
err = pci_enable_device(pdev);
if (!err)
dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
}
mutex_unlock(&dev->persist->pci_status_mutex);
return err;
}
static void mlx4_pci_disable_device(struct mlx4_dev *dev)
{
struct pci_dev *pdev = dev->persist->pdev;
mutex_lock(&dev->persist->pci_status_mutex);
if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
pci_disable_device(pdev);
dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
}
mutex_unlock(&dev->persist->pci_status_mutex);
}
static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
{
struct devlink *devlink = priv_to_devlink(priv);
struct mlx4_dev *dev;
unsigned sum = 0;
int err;
int port;
int i;
struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
devl_assert_locked(devlink);
dev = &priv->dev;
err = mlx4_adev_init(dev);
if (err)
return err;
ATOMIC_INIT_NOTIFIER_HEAD(&priv->event_nh);
mutex_init(&priv->port_mutex);
mutex_init(&priv->bond_mutex);
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
dev->numa_node = dev_to_node(&pdev->dev);
/* Detect if this device is a virtual function */
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
dev->flags |= MLX4_FLAG_SLAVE;
} else {
/* We reset the device and enable SRIOV only for physical
* devices. Try to claim ownership on the device;
* if already taken, skip -- do not allow multiple PFs */
err = mlx4_get_ownership(dev);
if (err) {
if (err < 0)
goto err_adev;
else {
mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
err = -EINVAL;
goto err_adev;
}
}
atomic_set(&priv->opreq_count, 0);
INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
* the HCA in an undefined state.
*/
err = mlx4_reset(dev);
if (err) {
mlx4_err(dev, "Failed to reset HCA, aborting\n");
goto err_sriov;
}
if (total_vfs) {
dev->flags = MLX4_FLAG_MASTER;
existing_vfs = pci_num_vf(pdev);
if (existing_vfs)
dev->flags |= MLX4_FLAG_SRIOV;
dev->persist->num_vfs = total_vfs;
}
}
/* on load remove any previous indication of internal error,
* device is up.
*/
dev->persist->state = MLX4_DEVICE_STATE_UP;
slave_start:
err = mlx4_cmd_init(dev);
if (err) {
mlx4_err(dev, "Failed to init command interface, aborting\n");
goto err_sriov;
}
/* In slave functions, the communication channel must be initialized
* before posting commands. Also, init num_slaves before calling
* mlx4_init_hca */
if (mlx4_is_mfunc(dev)) {
if (mlx4_is_master(dev)) {
dev->num_slaves = MLX4_MAX_NUM_SLAVES;
} else {
dev->num_slaves = 0;
err = mlx4_multi_func_init(dev);
if (err) {
mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
goto err_cmd;
}
}
}
err = mlx4_init_fw(dev);
if (err) {
mlx4_err(dev, "Failed to init fw, aborting.\n");
goto err_mfunc;
}
if (mlx4_is_master(dev)) {
/* when we hit the goto slave_start below, dev_cap already initialized */
if (!dev_cap) {
dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
if (!dev_cap) {
err = -ENOMEM;
goto err_fw;
}
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
goto err_fw;
}
if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
goto err_fw;
if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
u64 dev_flags = mlx4_enable_sriov(dev, pdev,
total_vfs,
existing_vfs,
reset_flow);
mlx4_close_fw(dev);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
dev->flags = dev_flags;
if (!SRIOV_VALID_STATE(dev->flags)) {
mlx4_err(dev, "Invalid SRIOV state\n");
goto err_sriov;
}
err = mlx4_reset(dev);
if (err) {
mlx4_err(dev, "Failed to reset HCA, aborting.\n");
goto err_sriov;
}
goto slave_start;
}
} else {
/* Legacy mode FW requires SRIOV to be enabled before
* doing QUERY_DEV_CAP, since max_eq's value is different if
* SRIOV is enabled.
*/
memset(dev_cap, 0, sizeof(*dev_cap));
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
goto err_fw;
}
if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
goto err_fw;
}
}
err = mlx4_init_hca(dev);
if (err) {
if (err == -EACCES) {
/* Not primary Physical function
* Running in slave mode */
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
/* We're not a PF */
if (dev->flags & MLX4_FLAG_SRIOV) {
if (!existing_vfs)
pci_disable_sriov(pdev);
if (mlx4_is_master(dev) && !reset_flow)
atomic_dec(&pf_loading);
dev->flags &= ~MLX4_FLAG_SRIOV;
}
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
dev->flags |= MLX4_FLAG_SLAVE;
dev->flags &= ~MLX4_FLAG_MASTER;
goto slave_start;
} else
goto err_fw;
}
if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
existing_vfs, reset_flow);
if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
dev->flags = dev_flags;
err = mlx4_cmd_init(dev);
if (err) {
/* Only VHCR is cleaned up, so could still
* send FW commands
*/
mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
goto err_close;
}
} else {
dev->flags = dev_flags;
}
if (!SRIOV_VALID_STATE(dev->flags)) {
mlx4_err(dev, "Invalid SRIOV state\n");
err = -EINVAL;
goto err_close;
}
}
/* check if the device is functioning at its maximum possible speed.
* No return code for this call, just warn the user in case of PCI
* express device capabilities are under-satisfied by the bus.
*/
if (!mlx4_is_slave(dev))
pcie_print_link_status(dev->persist->pdev);
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
if (dev->caps.num_ports < 2 &&
num_vfs_argc > 1) {
err = -EINVAL;
mlx4_err(dev,
"Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
dev->caps.num_ports);
goto err_close;
}
memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
for (i = 0;
i < sizeof(dev->persist->nvfs)/
sizeof(dev->persist->nvfs[0]); i++) {
unsigned j;
for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
dev->caps.num_ports;
}
}
/* In master functions, the communication channel
* must be initialized after obtaining its address from fw
*/
err = mlx4_multi_func_init(dev);
if (err) {
mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
goto err_close;
}
}
err = mlx4_alloc_eq_table(dev);
if (err)
goto err_master_mfunc;
bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
err = -EOPNOTSUPP;
mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
goto err_free_eq;
}
if (!mlx4_is_slave(dev)) {
err = mlx4_init_steering(dev);
if (err)
goto err_disable_msix;
}
mlx4_init_quotas(dev);
err = mlx4_setup_hca(dev);
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
dev->caps.num_comp_vectors = 1;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
if (err)
goto err_steer;
/* When PF resources are ready arm its comm channel to enable
* getting commands
*/
if (mlx4_is_master(dev)) {
err = mlx4_ARM_COMM_CHANNEL(dev);
if (err) {
mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
err);
goto err_steer;
}
}
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
if (err)
goto err_port;
}
priv->v2p.port1 = 1;
priv->v2p.port2 = 2;
err = mlx4_register_device(dev);
if (err)
goto err_port;
mlx4_sense_init(dev);
mlx4_start_sense(dev);
priv->removed = 0;
if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
atomic_dec(&pf_loading);
kfree(dev_cap);
return 0;
err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_default_counters(dev);
if (!mlx4_is_slave(dev))
mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
err_steer:
if (!mlx4_is_slave(dev))
mlx4_clear_steering(dev);
err_disable_msix:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
err_free_eq:
mlx4_free_eq_table(dev);
err_master_mfunc:
if (mlx4_is_master(dev)) {
mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
mlx4_multi_func_cleanup(dev);
}
if (mlx4_is_slave(dev))
mlx4_slave_destroy_special_qp_cap(dev);
err_close:
mlx4_close_hca(dev);
err_fw:
mlx4_close_fw(dev);
err_mfunc:
if (mlx4_is_slave(dev))
mlx4_multi_func_cleanup(dev);
err_cmd:
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
err_sriov:
if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
pci_disable_sriov(pdev);
dev->flags &= ~MLX4_FLAG_SRIOV;
}
if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
atomic_dec(&pf_loading);
kfree(priv->dev.dev_vfs);
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
kfree(dev_cap);
err_adev:
mlx4_adev_cleanup(dev);
return err;
}
static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
struct mlx4_priv *priv)
{
int err;
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
{2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
unsigned total_vfs = 0;
unsigned int i;
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
err = mlx4_pci_enable_device(&priv->dev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
/* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
* per port, we must limit the number of VFs to 63 (since their are
* 128 MACs)
*/
for (i = 0; i < ARRAY_SIZE(nvfs) && i < num_vfs_argc;
total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
if (nvfs[i] < 0) {
dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
err = -EINVAL;
goto err_disable_pdev;
}
}
for (i = 0; i < ARRAY_SIZE(prb_vf) && i < probe_vfs_argc;
i++) {
prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
err = -EINVAL;
goto err_disable_pdev;
}
}
if (total_vfs > MLX4_MAX_NUM_VF) {
dev_err(&pdev->dev,
"Requested more VF's (%d) than allowed by hw (%d)\n",
total_vfs, MLX4_MAX_NUM_VF);
err = -EINVAL;
goto err_disable_pdev;
}
for (i = 0; i < MLX4_MAX_PORTS; i++) {
if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
dev_err(&pdev->dev,
"Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
nvfs[i] + nvfs[2], i + 1,
MLX4_MAX_NUM_VF_P_PORT);
err = -EINVAL;
goto err_disable_pdev;
}
}
/* Check for BARs. */
if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
pci_dev_data, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing UAR, aborting\n");
err = -ENODEV;
goto err_disable_pdev;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
/* Detect if this device is a virtual function */
if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
/* When acting as pf, we normally skip vfs unless explicitly
* requested to probe them.
*/
if (total_vfs) {
unsigned vfs_offset = 0;
for (i = 0; i < ARRAY_SIZE(nvfs) &&
vfs_offset + nvfs[i] < extended_func_num(pdev);
vfs_offset += nvfs[i], i++)
;
if (i == ARRAY_SIZE(nvfs)) {
err = -ENODEV;
goto err_release_regions;
}
if ((extended_func_num(pdev) - vfs_offset)
> prb_vf[i]) {
dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
extended_func_num(pdev));
err = -ENODEV;
goto err_release_regions;
}
}
}
err = mlx4_crdump_init(&priv->dev);
if (err)
goto err_release_regions;
err = mlx4_catas_init(&priv->dev);
if (err)
goto err_crdump;
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
if (err)
goto err_catas;
return 0;
err_catas:
mlx4_catas_end(&priv->dev);
err_crdump:
mlx4_crdump_end(&priv->dev);
err_release_regions:
pci_release_regions(pdev);
err_disable_pdev:
mlx4_pci_disable_device(&priv->dev);
return err;
}
static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
union devlink_param_value saved_value;
int err;
err = devl_param_driverinit_value_get(devlink,
DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
&saved_value);
if (!err && mlx4_internal_err_reset != saved_value.vbool) {
mlx4_internal_err_reset = saved_value.vbool;
/* Notify on value changed on runtime configuration mode */
devl_param_value_changed(devlink,
DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
}
err = devl_param_driverinit_value_get(devlink,
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
&saved_value);
if (!err)
log_num_mac = order_base_2(saved_value.vu32);
err = devl_param_driverinit_value_get(devlink,
MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
&saved_value);
if (!err)
enable_64b_cqe_eqe = saved_value.vbool;
err = devl_param_driverinit_value_get(devlink,
MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
&saved_value);
if (!err)
enable_4k_uar = saved_value.vbool;
err = devl_param_driverinit_value_get(devlink,
DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
&saved_value);
if (!err && crdump->snapshot_enable != saved_value.vbool) {
crdump->snapshot_enable = saved_value.vbool;
devl_param_value_changed(devlink,
DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
}
}
static void mlx4_restart_one_down(struct pci_dev *pdev);
static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
struct devlink *devlink);
static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
enum devlink_reload_action action,
enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_dev_persistent *persist = dev->persist;
if (netns_change) {
NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
return -EOPNOTSUPP;
}
if (persist->num_vfs)
mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
mlx4_restart_one_down(persist->pdev);
return 0;
}
static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_dev_persistent *persist = dev->persist;
int err;
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
err = mlx4_restart_one_up(persist->pdev, true, devlink);
if (err)
mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
err);
return err;
}
static const struct devlink_ops mlx4_devlink_ops = {
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = mlx4_devlink_reload_down,
.reload_up = mlx4_devlink_reload_up,
};
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct devlink *devlink;
struct mlx4_priv *priv;
struct mlx4_dev *dev;
int ret;
printk_once(KERN_INFO "%s", mlx4_version);
devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
devl_lock(devlink);
priv = devlink_priv(devlink);
dev = &priv->dev;
dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL);
if (!dev->persist) {
ret = -ENOMEM;
goto err_devlink_free;
}
dev->persist->pdev = pdev;
dev->persist->dev = dev;
pci_set_drvdata(pdev, dev->persist);
priv->pci_dev_data = id->driver_data;
mutex_init(&dev->persist->device_state_mutex);
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
ret = devl_params_register(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
if (ret)
goto err_devlink_unregister;
mlx4_devlink_set_params_init_values(devlink);
ret = __mlx4_init_one(pdev, id->driver_data, priv);
if (ret)
goto err_params_unregister;
pci_save_state(pdev);
devl_unlock(devlink);
devlink_register(devlink);
return 0;
err_params_unregister:
devl_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
devl_unlock(devlink);
devlink_free(devlink);
return ret;
}
static void mlx4_clean_dev(struct mlx4_dev *dev)
{
struct mlx4_dev_persistent *persist = dev->persist;
struct mlx4_priv *priv = mlx4_priv(dev);
unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
memset(priv, 0, sizeof(*priv));
priv->dev.persist = persist;
priv->dev.flags = flags;
}
static void mlx4_unload_one(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int pci_dev_data;
struct devlink *devlink;
int p, i;
devlink = priv_to_devlink(priv);
devl_assert_locked(devlink);
if (priv->removed)
return;
/* saving current ports type for further use */
for (i = 0; i < dev->caps.num_ports; i++) {
dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
dev->persist->curr_port_poss_type[i] = dev->caps.
possible_type[i + 1];
}
pci_dev_data = priv->pci_dev_data;
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
for (p = 1; p <= dev->caps.num_ports; p++) {
mlx4_cleanup_port_info(&priv->port[p]);
mlx4_CLOSE_PORT(dev, p);
}
if (mlx4_is_master(dev))
mlx4_free_resource_tracker(dev,
RES_TR_FREE_SLAVES_ONLY);
mlx4_cleanup_default_counters(dev);
if (!mlx4_is_slave(dev))
mlx4_cleanup_counters_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
if (mlx4_is_master(dev))
mlx4_free_resource_tracker(dev,
RES_TR_FREE_STRUCTS_ONLY);
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
if (!mlx4_is_slave(dev))
mlx4_clear_steering(dev);
mlx4_free_eq_table(dev);
if (mlx4_is_master(dev))
mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev);
mlx4_close_fw(dev);
if (mlx4_is_slave(dev))
mlx4_multi_func_cleanup(dev);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
if (!mlx4_is_slave(dev))
mlx4_free_ownership(dev);
mlx4_slave_destroy_special_qp_cap(dev);
kfree(dev->dev_vfs);
mlx4_adev_cleanup(dev);
mlx4_clean_dev(dev);
priv->pci_dev_data = pci_dev_data;
priv->removed = 1;
}
static void mlx4_remove_one(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
devlink_unregister(devlink);
devl_lock(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
mutex_lock(&persist->interface_state_mutex);
persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
mutex_unlock(&persist->interface_state_mutex);
/* Disabling SR-IOV is not allowed while there are active vf's */
if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
active_vfs = mlx4_how_many_lives_vf(dev);
if (active_vfs) {
pr_warn("Removing PF when there are active VF's !!\n");
pr_warn("Will not disable SR-IOV.\n");
}
}
/* device marked to be under deletion running now without the lock
* letting other tasks to be terminated
*/
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
else
mlx4_info(dev, "%s: interface is down\n", __func__);
mlx4_catas_end(dev);
mlx4_crdump_end(dev);
if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
mlx4_warn(dev, "Disabling SR-IOV\n");
pci_disable_sriov(pdev);
}
pci_release_regions(pdev);
mlx4_pci_disable_device(dev);
devl_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
devl_unlock(devlink);
devlink_free(devlink);
}
static int restore_current_port_types(struct mlx4_dev *dev,
enum mlx4_port_type *types,
enum mlx4_port_type *poss_types)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err, i;
mlx4_stop_sense(dev);
mutex_lock(&priv->port_mutex);
for (i = 0; i < dev->caps.num_ports; i++)
dev->caps.possible_type[i + 1] = poss_types[i];
err = mlx4_change_port_types(dev, types);
mlx4_start_sense(dev);
mutex_unlock(&priv->port_mutex);
return err;
}
static void mlx4_restart_one_down(struct pci_dev *pdev)
{
mlx4_unload_one(pdev);
}
static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
struct devlink *devlink)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
int pci_dev_data, err, total_vfs;
pci_dev_data = priv->pci_dev_data;
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
if (reload)
mlx4_devlink_param_load_driverinit_values(devlink);
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
if (err) {
mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
__func__, pci_name(pdev), err);
return err;
}
err = restore_current_port_types(dev, dev->persist->curr_port_type,
dev->persist->curr_port_poss_type);
if (err)
mlx4_err(dev, "could not restore original port types (%d)\n",
err);
return err;
}
int mlx4_restart_one(struct pci_dev *pdev)
{
mlx4_restart_one_down(pdev);
return mlx4_restart_one_up(pdev, false, NULL);
}
#define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
#define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
static const struct pci_device_id mlx4_pci_table[] = {
#ifdef CONFIG_MLX4_CORE_GEN2
/* MT25408 "Hermon" */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */
MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */
/* MT25458 ConnectX EN 10GBASE-T */
MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */
/* MT26468 ConnectX EN 10GigE PCIe Gen2*/
MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
/* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
/* MT26478 ConnectX2 40GigE PCIe Gen2 */
MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
/* MT25400 Family [ConnectX-2] */
MLX_VF(0x1002), /* Virtual Function */
#endif /* CONFIG_MLX4_CORE_GEN2 */
/* MT27500 Family [ConnectX-3] */
MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
MLX_VF(0x1004), /* Virtual Function */
MLX_GN(0x1005), /* MT27510 Family */
MLX_GN(0x1006), /* MT27511 Family */
MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */
MLX_GN(0x1008), /* MT27521 Family */
MLX_GN(0x1009), /* MT27530 Family */
MLX_GN(0x100a), /* MT27531 Family */
MLX_GN(0x100b), /* MT27540 Family */
MLX_GN(0x100c), /* MT27541 Family */
MLX_GN(0x100d), /* MT27550 Family */
MLX_GN(0x100e), /* MT27551 Family */
MLX_GN(0x100f), /* MT27560 Family */
MLX_GN(0x1010), /* MT27561 Family */
/*
* See the mellanox_check_broken_intx_masking() quirk when
* adding devices
*/
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct devlink *devlink;
mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
mlx4_enter_error_state(persist);
devlink = priv_to_devlink(mlx4_priv(dev));
devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
devl_unlock(devlink);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
mlx4_pci_disable_device(persist->dev);
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
int err;
mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
err = mlx4_pci_enable_device(dev);
if (err) {
mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
static void mlx4_pci_resume(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
struct devlink *devlink;
int total_vfs;
int err;
mlx4_err(dev, "%s was called\n", __func__);
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
devlink = priv_to_devlink(priv);
devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
priv, 1);
if (err) {
mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
__func__, err);
goto end;
}
err = restore_current_port_types(dev, dev->persist->
curr_port_type, dev->persist->
curr_port_poss_type);
if (err)
mlx4_err(dev, "could not restore original port types (%d)\n", err);
}
end:
mutex_unlock(&persist->interface_state_mutex);
devl_unlock(devlink);
}
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct devlink *devlink;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
devlink = priv_to_devlink(mlx4_priv(dev));
devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
devl_unlock(devlink);
mlx4_pci_disable_device(dev);
}
static const struct pci_error_handlers mlx4_err_handler = {
.error_detected = mlx4_pci_err_detected,
.slot_reset = mlx4_pci_slot_reset,
.resume = mlx4_pci_resume,
};
static int __maybe_unused mlx4_suspend(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct devlink *devlink;
mlx4_err(dev, "suspend was called\n");
devlink = priv_to_devlink(mlx4_priv(dev));
devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
devl_unlock(devlink);
return 0;
}
static int __maybe_unused mlx4_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
struct devlink *devlink;
int total_vfs;
int ret = 0;
mlx4_err(dev, "resume was called\n");
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
devlink = priv_to_devlink(priv);
devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
nvfs, priv, 1);
if (!ret) {
ret = restore_current_port_types(dev,
dev->persist->curr_port_type,
dev->persist->curr_port_poss_type);
if (ret)
mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret);
}
}
mutex_unlock(&persist->interface_state_mutex);
devl_unlock(devlink);
return ret;
}
static SIMPLE_DEV_PM_OPS(mlx4_pm_ops, mlx4_suspend, mlx4_resume);
static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
.shutdown = mlx4_shutdown,
.remove = mlx4_remove_one,
.driver.pm = &mlx4_pm_ops,
.err_handler = &mlx4_err_handler,
};
static int __init mlx4_verify_params(void)
{
if (msi_x < 0) {
pr_warn("mlx4_core: bad msi_x: %d\n", msi_x);
return -1;
}
if ((log_num_mac < 0) || (log_num_mac > 7)) {
pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
return -1;
}
if (log_num_vlan != 0)
pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
MLX4_LOG_NUM_VLANS);
if (use_prio != 0)
pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
log_mtts_per_seg);
return -1;
}
/* Check if module param for ports type has legal combination */
if (port_type_array[0] == false && port_type_array[1] == true) {
pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
port_type_array[0] = true;
}
if (mlx4_log_num_mgm_entry_size < -7 ||
(mlx4_log_num_mgm_entry_size > 0 &&
(mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
mlx4_log_num_mgm_entry_size,
MLX4_MIN_MGM_LOG_ENTRY_SIZE,
MLX4_MAX_MGM_LOG_ENTRY_SIZE);
return -1;
}
return 0;
}
static int __init mlx4_init(void)
{
int ret;
WARN_ONCE(strcmp(MLX4_ADEV_NAME, KBUILD_MODNAME),
"mlx4_core name not in sync with kernel module name");
if (mlx4_verify_params())
return -EINVAL;
mlx4_wq = create_singlethread_workqueue("mlx4");
if (!mlx4_wq)
return -ENOMEM;
ret = pci_register_driver(&mlx4_driver);
if (ret < 0)
destroy_workqueue(mlx4_wq);
return ret < 0 ? ret : 0;
}
static void __exit mlx4_cleanup(void)
{
pci_unregister_driver(&mlx4_driver);
destroy_workqueue(mlx4_wq);
}
module_init(mlx4_init);
module_exit(mlx4_cleanup);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/main.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mlx4/qp.h>
#include "mlx4_en.h"
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn,
int user_prio, struct mlx4_qp_context *context)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
memset(context, 0, sizeof(*context));
context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
if (!is_tx && !rss)
context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
if (is_tx) {
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP);
} else {
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
}
context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
mdev->priv_uar.index));
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
/* force user priority per tx ring */
if (user_prio >= 0 && priv->prof->num_up == MLX4_EN_NUM_UP_HIGH) {
context->pri_path.sched_queue |= user_prio << 3;
context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
}
context->pri_path.counter_index = priv->counter_index;
context->cqn_send = cpu_to_be32(cqn);
context->cqn_recv = cpu_to_be32(cqn);
if (!rss &&
(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
context->pri_path.counter_index !=
MLX4_SINK_COUNTER_INDEX(mdev->dev)) {
/* disable multicast loopback to qp with same counter */
if (!(dev->features & NETIF_F_LOOPBACK))
context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
context->pri_path.control |= MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
}
context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
context->param3 |= cpu_to_be32(1 << 30);
if (!is_tx && !rss &&
(mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */
}
}
int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
int loopback)
{
int ret;
struct mlx4_update_qp_params qp_params;
memset(&qp_params, 0, sizeof(qp_params));
if (!loopback)
qp_params.flags = MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB;
ret = mlx4_update_qp(priv->mdev->dev, qp->qpn,
MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB,
&qp_params);
return ret;
}
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_resources.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
#include <linux/rculist.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/irq.h>
#include <net/ip.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_checksum.h>
#endif
#include "mlx4_en.h"
static int mlx4_alloc_page(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc *frag,
gfp_t gfp)
{
struct page *page;
dma_addr_t dma;
page = alloc_page(gfp);
if (unlikely(!page))
return -ENOMEM;
dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
if (unlikely(dma_mapping_error(priv->ddev, dma))) {
__free_page(page);
return -ENOMEM;
}
frag->page = page;
frag->dma = dma;
frag->page_offset = priv->rx_headroom;
return 0;
}
static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_desc *rx_desc,
struct mlx4_en_rx_alloc *frags,
gfp_t gfp)
{
int i;
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
if (mlx4_alloc_page(priv, frags, gfp))
return -ENOMEM;
ring->rx_alloc_pages++;
}
rx_desc->data[i].addr = cpu_to_be64(frags->dma +
frags->page_offset);
}
return 0;
}
static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc *frag)
{
if (frag->page) {
dma_unmap_page(priv->ddev, frag->dma,
PAGE_SIZE, priv->dma_dir);
__free_page(frag->page);
}
/* We need to clear all fields, otherwise a change of priv->log_rx_info
* could lead to see garbage later in frag->page.
*/
memset(frag, 0, sizeof(*frag));
}
static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
int possible_frags;
int i;
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
}
/* If the number of used fragments does not fill up the ring stride,
* remaining (unused) fragments must be padded with null address/size
* and a special memory key */
possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
for (i = priv->num_frags; i < possible_frags; i++) {
rx_desc->data[i].byte_count = 0;
rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
rx_desc->data[i].addr = 0;
}
}
static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index,
gfp_t gfp)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf +
(index << ring->log_stride);
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
if (likely(ring->page_cache.index > 0)) {
/* XDP uses a single page per frame */
if (!frags->page) {
ring->page_cache.index--;
frags->page = ring->page_cache.buf[ring->page_cache.index].page;
frags->dma = ring->page_cache.buf[ring->page_cache.index].dma;
}
frags->page_offset = XDP_PACKET_HEADROOM;
rx_desc->data[0].addr = cpu_to_be64(frags->dma +
XDP_PACKET_HEADROOM);
return 0;
}
return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
}
static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
{
return ring->prod == ring->cons;
}
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}
/* slow path */
static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
struct mlx4_en_rx_alloc *frags;
int nr;
frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
mlx4_en_free_frag(priv, frags + nr);
}
}
/* Function not in fast-path */
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
int ring_ind;
int buf_ind;
int new_size;
for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = priv->rx_ring[ring_ind];
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size,
GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
} else {
new_size = rounddown_pow_of_two(ring->actual_size);
en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
ring->actual_size, new_size);
goto reduce_rings;
}
}
ring->actual_size++;
ring->prod++;
}
}
return 0;
reduce_rings:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = priv->rx_ring[ring_ind];
while (ring->actual_size > new_size) {
ring->actual_size--;
ring->prod--;
mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
}
}
return 0;
}
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
int index;
en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
for (index = 0; index < ring->size; index++) {
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
}
ring->cons = 0;
ring->prod = 0;
}
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
{
int i;
int num_of_eqs;
int num_rx_rings;
struct mlx4_dev *dev = mdev->dev;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
num_of_eqs = max_t(int, MIN_RX_RINGS,
min_t(int,
mlx4_get_eqs_per_port(mdev->dev, i),
DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
min_t(int, num_of_eqs, num_online_cpus());
mdev->profile.prof[i].rx_ring_num =
rounddown_pow_of_two(num_rx_rings);
}
}
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node, int queue_index)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring;
int err = -ENOMEM;
int tmp;
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
en_err(priv, "Failed to allocate RX ring structure\n");
return -ENOMEM;
}
ring->prod = 0;
ring->cons = 0;
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
goto err_ring;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
if (!ring->rx_info) {
err = -ENOMEM;
goto err_xdp_info;
}
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_info;
ring->buf = ring->wqres.buf.direct.buf;
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
*pring = ring;
return 0;
err_info:
kvfree(ring->rx_info);
ring->rx_info = NULL;
err_xdp_info:
xdp_rxq_info_unreg(&ring->xdp_rxq);
err_ring:
kfree(ring);
*pring = NULL;
return err;
}
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
int i;
int ring_ind;
int err;
int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * priv->num_frags);
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = priv->rx_ring[ring_ind];
ring->prod = 0;
ring->cons = 0;
ring->actual_size = 0;
ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
if (ring->stride <= TXBB_SIZE) {
/* Stamp first unused send wqe */
__be32 *ptr = (__be32 *)ring->buf;
__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
*ptr = stamp;
/* Move pointer to start of rx section */
ring->buf += TXBB_SIZE;
}
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
memset(ring->buf, 0, ring->buf_size);
mlx4_en_update_rx_prod_db(ring);
/* Initialize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
goto err_buffers;
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = priv->rx_ring[ring_ind];
ring->size_mask = ring->actual_size - 1;
mlx4_en_update_rx_prod_db(ring);
}
return 0;
err_buffers:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
while (ring_ind >= 0) {
if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
ring_ind--;
}
return err;
}
/* We recover from out of memory by scheduling our napi poll
* function (mlx4_en_process_cq), which tries to allocate
* all missing RX buffers (call to mlx4_en_refill_rx_buffers).
*/
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
{
int ring;
if (!priv->port_up)
return;
for (ring = 0; ring < priv->rx_ring_num; ring++) {
if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
local_bh_disable();
napi_reschedule(&priv->rx_cq[ring]->napi);
local_bh_enable();
}
}
}
/* When the rx ring is running in page-per-packet mode, a released frame can go
* directly into a small cache, to avoid unmapping or touching the page
* allocator. In bpf prog performance scenarios, buffers are either forwarded
* or dropped, never converted to skbs, so every page can come directly from
* this cache when it is sized to be a multiple of the napi budget.
*/
bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
struct mlx4_en_rx_alloc *frame)
{
struct mlx4_en_page_cache *cache = &ring->page_cache;
if (cache->index >= MLX4_EN_CACHE_SIZE)
return false;
cache->buf[cache->index].page = frame->page;
cache->buf[cache->index].dma = frame->dma;
cache->index++;
return true;
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring = *pring;
struct bpf_prog *old_prog;
old_prog = rcu_dereference_protected(
ring->xdp_prog,
lockdep_is_held(&mdev->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
xdp_rxq_info_unreg(&ring->xdp_rxq);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
kvfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
int i;
for (i = 0; i < ring->page_cache.index; i++) {
dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
PAGE_SIZE, priv->dma_dir);
put_page(ring->page_cache.buf[i].page);
}
ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
}
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc *frags,
struct sk_buff *skb,
int length)
{
const struct mlx4_en_frag_info *frag_info = priv->frag_info;
unsigned int truesize = 0;
bool release = true;
int nr, frag_size;
struct page *page;
dma_addr_t dma;
/* Collect used fragments while replacing them in the HW descriptors */
for (nr = 0;; frags++) {
frag_size = min_t(int, length, frag_info->frag_size);
page = frags->page;
if (unlikely(!page))
goto fail;
dma = frags->dma;
dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
frag_size, priv->dma_dir);
__skb_fill_page_desc(skb, nr, page, frags->page_offset,
frag_size);
truesize += frag_info->frag_stride;
if (frag_info->frag_stride == PAGE_SIZE / 2) {
frags->page_offset ^= PAGE_SIZE / 2;
release = page_count(page) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
} else if (!priv->rx_headroom) {
/* rx_headroom for non XDP setup is always 0.
* When XDP is set, the above condition will
* guarantee page is always released.
*/
u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
frags->page_offset += sz_align;
release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
}
if (release) {
dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
frags->page = NULL;
} else {
page_ref_inc(page);
}
nr++;
length -= frag_size;
if (!length)
break;
frag_info++;
}
skb->truesize += truesize;
return nr;
fail:
while (nr > 0) {
nr--;
__skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
}
return 0;
}
static void validate_loopback(struct mlx4_en_priv *priv, void *va)
{
const unsigned char *data = va + ETH_HLEN;
int i;
for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
if (data[i] != (unsigned char)i)
return;
}
/* Loopback found */
priv->loopback_ok = 1;
}
static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
u32 missing = ring->actual_size - (ring->prod - ring->cons);
/* Try to batch allocations, but not too much. */
if (missing < 8)
return;
do {
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask,
GFP_ATOMIC | __GFP_MEMALLOC))
break;
ring->prod++;
} while (likely(--missing));
mlx4_en_update_rx_prod_db(ring);
}
/* When hardware doesn't strip the vlan, we need to calculate the checksum
* over it and add it to the hardware's checksum calculation
*/
static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
struct vlan_hdr *vlanh)
{
return csum_add(hw_checksum, *(__wsum *)vlanh);
}
/* Although the stack expects checksum which doesn't include the pseudo
* header, the HW adds it. To address that, we are subtracting the pseudo
* header checksum from the checksum value provided by the HW.
*/
static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
struct iphdr *iph)
{
__u16 length_for_csum = 0;
__wsum csum_pseudo_header = 0;
__u8 ipproto = iph->protocol;
if (unlikely(ipproto == IPPROTO_SCTP))
return -1;
length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
length_for_csum, ipproto, 0);
skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
/* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header:
* 4 first bytes : priority, version, flow_lbl
* and 2 additional bytes : nexthdr, hop_limit.
*/
static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
struct ipv6hdr *ipv6h)
{
__u8 nexthdr = ipv6h->nexthdr;
__wsum temp;
if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
nexthdr == IPPROTO_HOPOPTS ||
nexthdr == IPPROTO_SCTP))
return -1;
/* priority, version, flow_lbl */
temp = csum_add(hw_checksum, *(__wsum *)ipv6h);
/* nexthdr and hop_limit */
skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
return 0;
}
#endif
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
/* We reach this function only after checking that any of
* the (IPv4 | IPv6) bits are set in cqe->status.
*/
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
__wsum hw_checksum = 0;
void *hdr;
/* CQE csum doesn't cover padding octets in short ethernet
* frames. And the pad field is appended prior to calculating
* and appending the FCS field.
*
* Detecting these padded frames requires to verify and parse
* IP headers, so we simply force all those small frames to skip
* checksum complete.
*/
if (short_frame(skb->len))
return -EINVAL;
hdr = (u8 *)va + sizeof(struct ethhdr);
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
!(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
}
#if IS_ENABLED(CONFIG_IPV6)
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
#endif
return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
}
#if IS_ENABLED(CONFIG_IPV6)
#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6)
#else
#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
#endif
struct mlx4_en_xdp_buff {
struct xdp_buff xdp;
struct mlx4_cqe *cqe;
struct mlx4_en_dev *mdev;
struct mlx4_en_rx_ring *ring;
struct net_device *dev;
};
int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
{
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
return -ENODATA;
*timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
mlx4_en_get_cqe_ts(_ctx->cqe));
return 0;
}
int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
enum xdp_rss_hash_type *rss_type)
{
struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
struct mlx4_cqe *cqe = _ctx->cqe;
enum xdp_rss_hash_type xht = 0;
__be16 status;
if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
return -ENODATA;
*hash = be32_to_cpu(cqe->immed_rss_invalid);
status = cqe->status;
if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP))
xht = XDP_RSS_L4_TCP;
if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP))
xht = XDP_RSS_L4_UDP;
if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F))
xht |= XDP_RSS_L3_IPV4;
if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) {
xht |= XDP_RSS_L3_IPV6;
if (cqe->ipv6_ext_mask)
xht |= XDP_RSS_L3_DYNHDR;
}
*rss_type = xht;
return 0;
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_xdp_buff mxbuf = {};
int factor = priv->cqe_factor;
struct mlx4_en_rx_ring *ring;
struct bpf_prog *xdp_prog;
int cq_ring = cq->ring;
bool doorbell_pending;
bool xdp_redir_flush;
struct mlx4_cqe *cqe;
int polled = 0;
int index;
if (unlikely(!priv->port_up || budget <= 0))
return 0;
ring = priv->rx_ring[cq_ring];
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
xdp_redir_flush = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
index = cq->mcq.cons_index & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
struct mlx4_en_rx_alloc *frags;
enum pkt_hash_types hash_type;
struct sk_buff *skb;
unsigned int length;
int ip_summed;
void *va;
int nr;
frags = ring->rx_info + (index << priv->log_rx_info);
va = page_address(frags[0].page) + frags[0].page_offset;
net_prefetchw(va);
/*
* make sure we read the CQE after we read the ownership bit
*/
dma_rmb();
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
}
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
goto next;
}
/* Check if we need to drop the packet if SRIOV is not enabled
* and not performing the selftest or flb disabled
*/
if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
const struct ethhdr *ethh = va;
dma_addr_t dma;
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
dma = frags[0].dma + frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE);
if (is_multicast_ether_addr(ethh->h_dest)) {
struct mlx4_mac_entry *entry;
struct hlist_head *bucket;
unsigned int mac_hash;
/* Drop the packet, since HW loopback-ed it */
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
hlist_for_each_entry_rcu_bh(entry, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
ethh->h_source))
goto next;
}
}
}
if (unlikely(priv->validate_loopback)) {
validate_loopback(priv, va);
goto next;
}
/*
* Packet is OK - process it.
*/
length = be32_to_cpu(cqe->byte_cnt);
length -= ring->fcs_del;
/* A bpf program gets first chance to drop the packet. It may
* read bytes but not past the end of the frag.
*/
if (xdp_prog) {
dma_addr_t dma;
void *orig_data;
u32 act;
dma = frags[0].dma + frags[0].page_offset;
dma_sync_single_for_cpu(priv->ddev, dma,
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset,
frags[0].page_offset, length, true);
orig_data = mxbuf.xdp.data;
mxbuf.cqe = cqe;
mxbuf.mdev = priv->mdev;
mxbuf.ring = ring;
mxbuf.dev = dev;
act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
length = mxbuf.xdp.data_end - mxbuf.xdp.data;
if (mxbuf.xdp.data != orig_data) {
frags[0].page_offset = mxbuf.xdp.data -
mxbuf.xdp.data_hard_start;
va = mxbuf.xdp.data;
}
switch (act) {
case XDP_PASS:
break;
case XDP_REDIRECT:
if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) {
ring->xdp_redirect++;
xdp_redir_flush = true;
frags[0].page = NULL;
goto next;
}
ring->xdp_redirect_fail++;
trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt;
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
length, cq_ring,
&doorbell_pending))) {
frags[0].page = NULL;
goto next;
}
trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
goto next;
}
}
ring->bytes += length;
ring->packets++;
skb = napi_get_frags(&cq->napi);
if (unlikely(!skb))
goto next;
if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
u64 timestamp = mlx4_en_get_cqe_ts(cqe);
mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
timestamp);
}
skb_record_rx_queue(skb, cq_ring);
if (likely(dev->features & NETIF_F_RXCSUM)) {
/* TODO: For IP non TCP/UDP packets when csum complete is
* not an option (not supported or any other reason) we can
* actually check cqe IPOK status bit and report
* CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
*/
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
cqe->checksum == cpu_to_be16(0xffff)) {
bool l2_tunnel;
l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
ip_summed = CHECKSUM_UNNECESSARY;
hash_type = PKT_HASH_TYPE_L4;
if (l2_tunnel)
skb->csum_level = 1;
ring->csum_ok++;
} else {
if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
goto csum_none;
if (check_csum(cqe, skb, va, dev->features))
goto csum_none;
ip_summed = CHECKSUM_COMPLETE;
hash_type = PKT_HASH_TYPE_L3;
ring->csum_complete++;
}
} else {
csum_none:
ip_summed = CHECKSUM_NONE;
hash_type = PKT_HASH_TYPE_L3;
ring->csum_none++;
}
skb->ip_summed = ip_summed;
if (dev->features & NETIF_F_RXHASH)
skb_set_hash(skb,
be32_to_cpu(cqe->immed_rss_invalid),
hash_type);
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(cqe->sl_vid));
else if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_STAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
be16_to_cpu(cqe->sl_vid));
nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
if (likely(nr)) {
skb_shinfo(skb)->nr_frags = nr;
skb->len = length;
skb->data_len = length;
napi_gro_frags(&cq->napi);
} else {
__vlan_hwaccel_clear_tag(skb);
skb_clear_hash(skb);
}
next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
if (unlikely(++polled == budget))
break;
}
if (xdp_redir_flush)
xdp_do_flush();
if (likely(polled)) {
if (doorbell_pending) {
priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
}
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
}
mlx4_en_refill_rx_buffers(priv, ring);
return polled;
}
void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
if (likely(priv->port_up))
napi_schedule_irqoff(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
{
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_cq *xdp_tx_cq = NULL;
bool clean_complete = true;
int done;
if (!budget)
return 0;
if (priv->tx_ring_num[TX_XDP]) {
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) {
clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
budget) < budget;
xdp_tx_cq->xdp_busy = !clean_complete;
}
}
done = mlx4_en_process_rx_cq(dev, cq, budget);
/* If we used up all the quota - we're probably not done yet... */
if (done == budget || !clean_complete) {
int cpu_curr;
/* in case we got here because of !clean_complete */
done = budget;
cpu_curr = smp_processor_id();
if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
return budget;
/* Current cpu is not according to smp_irq_affinity -
* probably affinity changed. Need to stop this NAPI
* poll, and restart it on the right CPU.
* Try to avoid returning a too small value (like 0),
* to not fool net_rx_action() and its netdev_budget
*/
if (done)
done--;
}
/* Done for now */
if (likely(napi_complete_done(napi, done)))
mlx4_en_arm_cq(priv, cq);
return done;
}
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
int i = 0;
/* bpf requires buffers to be set up as 1 packet per page.
* This only works when num_frags == 1.
*/
if (priv->tx_ring_num[TX_XDP]) {
priv->frag_info[0].frag_size = eff_mtu;
/* This will gain efficient xdp frame recycling at the
* expense of more costly truesize accounting
*/
priv->frag_info[0].frag_stride = PAGE_SIZE;
priv->dma_dir = DMA_BIDIRECTIONAL;
priv->rx_headroom = XDP_PACKET_HEADROOM;
i = 1;
} else {
int frag_size_max = 2048, buf_size = 0;
/* should not happen, right ? */
if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
frag_size_max = PAGE_SIZE;
while (buf_size < eff_mtu) {
int frag_stride, frag_size = eff_mtu - buf_size;
int pad, nb;
if (i < MLX4_EN_MAX_RX_FRAGS - 1)
frag_size = min(frag_size, frag_size_max);
priv->frag_info[i].frag_size = frag_size;
frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
/* We can only pack 2 1536-bytes frames in on 4K page
* Therefore, each frame would consume more bytes (truesize)
*/
nb = PAGE_SIZE / frag_stride;
pad = (PAGE_SIZE - nb * frag_stride) / nb;
pad &= ~(SMP_CACHE_BYTES - 1);
priv->frag_info[i].frag_stride = frag_stride + pad;
buf_size += frag_size;
i++;
}
priv->dma_dir = DMA_FROM_DEVICE;
priv->rx_headroom = 0;
}
priv->num_frags = i;
priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_dbg(DRV,
priv,
" frag:%d - size:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
priv->frag_info[i].frag_stride);
}
}
/* RSS related functions */
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_rx_ring *ring,
enum mlx4_qp_state *state,
struct mlx4_qp *qp)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_qp_context *context;
int err = 0;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
en_err(priv, "Failed to allocate qp #%x\n", qpn);
goto out;
}
qp->event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
qpn, ring->cqn, -1, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
/* Cancel FCS removal if FW allows */
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
context->param3 |= cpu_to_be32(1 << 29);
if (priv->dev->features & NETIF_F_RXFCS)
ring->fcs_del = 0;
else
ring->fcs_del = ETH_FCS_LEN;
} else
ring->fcs_del = 0;
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
if (err) {
mlx4_qp_remove(mdev->dev, qp);
mlx4_qp_free(mdev->dev, qp);
}
mlx4_en_update_rx_prod_db(ring);
out:
kfree(context);
return err;
}
int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
{
int err;
u32 qpn;
err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
MLX4_RESERVE_A0_QP,
MLX4_RES_USAGE_DRIVER);
if (err) {
en_err(priv, "Failed reserving drop qpn\n");
return err;
}
err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
if (err) {
en_err(priv, "Failed allocating drop qp\n");
mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
return err;
}
return 0;
}
void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
{
u32 qpn;
qpn = priv->drop_qp.qpn;
mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
}
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
struct mlx4_rss_context *rss_context;
int rss_rings;
void *ptr;
u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
MLX4_RSS_TCP_IPV6);
int i, qpn;
int err = 0;
int good_qps = 0;
u8 flags;
en_dbg(DRV, priv, "Configuring rss steering\n");
flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
priv->rx_ring_num,
&rss_map->base_qpn, flags,
MLX4_RES_USAGE_DRIVER);
if (err) {
en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
qpn = rss_map->base_qpn + i;
err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
&rss_map->state[i],
&rss_map->qps[i]);
if (err)
goto rss_err;
++good_qps;
}
if (priv->rx_ring_num == 1) {
rss_map->indir_qp = &rss_map->qps[0];
priv->base_qpn = rss_map->indir_qp->qpn;
en_info(priv, "Optimized Non-RSS steering\n");
return 0;
}
rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL);
if (!rss_map->indir_qp) {
err = -ENOMEM;
goto rss_err;
}
/* Configure RSS indirection qp */
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto qp_alloc_err;
}
rss_map->indir_qp->event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0]->cqn, -1, &context);
if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
rss_rings = priv->rx_ring_num;
else
rss_rings = priv->prof->rss_rings;
ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
if (priv->mdev->profile.udp_rss) {
rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
rss_context->base_qpn_udp = rss_context->default_qpn;
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
}
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
rss_context->hash_fn = MLX4_RSS_HASH_XOR;
} else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
memcpy(rss_context->rss_key, priv->rss_key,
MLX4_EN_RSS_KEY_SIZE);
} else {
en_err(priv, "Unknown RSS hash function requested\n");
err = -EINVAL;
goto indir_err;
}
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
rss_map->indir_qp, &rss_map->indir_state);
if (err)
goto indir_err;
return 0;
indir_err:
mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
mlx4_qp_free(mdev->dev, rss_map->indir_qp);
qp_alloc_err:
kfree(rss_map->indir_qp);
rss_map->indir_qp = NULL;
rss_err:
for (i = 0; i < good_qps; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
return err;
}
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
int i;
if (priv->rx_ring_num > 1) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
MLX4_QP_STATE_RST, NULL, 0, 0,
rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
mlx4_qp_free(mdev->dev, rss_map->indir_qp);
kfree(rss_map->indir_qp);
rss_map->indir_qp = NULL;
}
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/en_rx.c
|
/*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
#include "fw.h"
/*
* We allocate in as big chunks as we can, up to a maximum of 256 KB
* per chunk. Note that the chunks are not necessarily in contiguous
* physical memory.
*/
enum {
MLX4_ICM_ALLOC_SIZE = 1 << 18,
MLX4_TABLE_CHUNK_SIZE = 1 << 18,
};
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
int i;
if (chunk->nsg > 0)
dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages,
DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->sg[i]),
get_order(chunk->sg[i].length));
}
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
int i;
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->persist->pdev->dev,
chunk->buf[i].size,
chunk->buf[i].addr,
chunk->buf[i].dma_addr);
}
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
{
struct mlx4_icm_chunk *chunk, *tmp;
if (!icm)
return;
list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
if (coherent)
mlx4_free_icm_coherent(dev, chunk);
else
mlx4_free_icm_pages(dev, chunk);
kfree(chunk);
}
kfree(icm);
}
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
gfp_t gfp_mask, int node)
{
struct page *page;
page = alloc_pages_node(node, gfp_mask, order);
if (!page) {
page = alloc_pages(gfp_mask, order);
if (!page)
return -ENOMEM;
}
sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0;
}
static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
int order, gfp_t gfp_mask)
{
buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
&buf->dma_addr, gfp_mask);
if (!buf->addr)
return -ENOMEM;
if (offset_in_page(buf->addr)) {
dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
buf->dma_addr);
return -ENOMEM;
}
buf->size = PAGE_SIZE << order;
return 0;
}
struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent)
{
struct mlx4_icm *icm;
struct mlx4_icm_chunk *chunk = NULL;
int cur_order;
gfp_t mask;
int ret;
/* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
icm = kmalloc_node(sizeof(*icm),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
dev->numa_node);
if (!icm) {
icm = kmalloc(sizeof(*icm),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!icm)
return NULL;
}
icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list);
cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
while (npages > 0) {
if (!chunk) {
chunk = kzalloc_node(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN),
dev->numa_node);
if (!chunk) {
chunk = kzalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN));
if (!chunk)
goto fail;
}
chunk->coherent = coherent;
if (!coherent)
sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
list_add_tail(&chunk->list, &icm->chunk_list);
}
while (1 << cur_order > npages)
--cur_order;
mask = gfp_mask;
if (cur_order)
mask &= ~__GFP_DIRECT_RECLAIM;
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
&chunk->buf[chunk->npages],
cur_order, mask);
else
ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
cur_order, mask,
dev->numa_node);
if (ret) {
if (--cur_order < 0)
goto fail;
else
continue;
}
++chunk->npages;
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = dma_map_sg(&dev->persist->pdev->dev,
chunk->sg, chunk->npages,
DMA_BIDIRECTIONAL);
if (!chunk->nsg)
goto fail;
}
if (chunk->npages == MLX4_ICM_CHUNK_LEN)
chunk = NULL;
npages -= 1 << cur_order;
}
if (!coherent && chunk) {
chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
chunk->npages, DMA_BIDIRECTIONAL);
if (!chunk->nsg)
goto fail;
}
return icm;
fail:
mlx4_free_icm(dev, icm, coherent);
return NULL;
}
static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
}
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
{
u32 i = (obj & (table->num_obj - 1)) /
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
int ret = 0;
mutex_lock(&table->mutex);
if (table->icm[i]) {
++table->icm[i]->refcount;
goto out;
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
goto out;
}
if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
(u64) i * MLX4_TABLE_CHUNK_SIZE)) {
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
ret = -ENOMEM;
goto out;
}
++table->icm[i]->refcount;
out:
mutex_unlock(&table->mutex);
return ret;
}
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
{
u32 i;
u64 offset;
i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
mutex_lock(&table->mutex);
if (--table->icm[i]->refcount == 0) {
offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
mlx4_UNMAP_ICM(dev, table->virt + offset,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
}
mutex_unlock(&table->mutex);
}
void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
dma_addr_t *dma_handle)
{
int offset, dma_offset, i;
u64 idx;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
void *addr = NULL;
if (!table->lowmem)
return NULL;
mutex_lock(&table->mutex);
idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
if (!icm)
goto out;
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
dma_addr_t dma_addr;
size_t len;
if (table->coherent) {
len = chunk->buf[i].size;
dma_addr = chunk->buf[i].dma_addr;
addr = chunk->buf[i].addr;
} else {
struct page *page;
len = sg_dma_len(&chunk->sg[i]);
dma_addr = sg_dma_address(&chunk->sg[i]);
/* XXX: we should never do this for highmem
* allocation. This function either needs
* to be split, or the kernel virtual address
* return needs to be made optional.
*/
page = sg_page(&chunk->sg[i]);
addr = lowmem_page_address(page);
}
if (dma_handle && dma_offset >= 0) {
if (len > dma_offset)
*dma_handle = dma_addr + dma_offset;
dma_offset -= len;
}
/*
* DMA mapping can merge pages but not split them,
* so if we found the page, dma_handle has already
* been assigned to.
*/
if (len > offset)
goto out;
offset -= len;
}
}
addr = NULL;
out:
mutex_unlock(&table->mutex);
return addr ? addr + offset : NULL;
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 start, u32 end)
{
int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
int err;
u32 i;
for (i = start; i <= end; i += inc) {
err = mlx4_table_get(dev, table, i);
if (err)
goto fail;
}
return 0;
fail:
while (i > start) {
i -= inc;
mlx4_table_put(dev, table, i);
}
return err;
}
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 start, u32 end)
{
u32 i;
for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
mlx4_table_put(dev, table, i);
}
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, u32 nobj, int reserved,
int use_lowmem, int use_coherent)
{
int obj_per_chunk;
int num_icm;
unsigned chunk_size;
int i;
u64 size;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
if (WARN_ON(!obj_per_chunk))
return -EINVAL;
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
if (!table->icm)
return -ENOMEM;
table->virt = virt;
table->num_icm = num_icm;
table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
table->coherent = use_coherent;
mutex_init(&table->mutex);
size = (u64) nobj * obj_size;
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
chunk_size = MLX4_TABLE_CHUNK_SIZE;
if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
chunk_size = PAGE_ALIGN(size -
i * MLX4_TABLE_CHUNK_SIZE);
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, use_coherent);
if (!table->icm[i])
goto err;
if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
mlx4_free_icm(dev, table->icm[i], use_coherent);
table->icm[i] = NULL;
goto err;
}
/*
* Add a reference to this ICM chunk so that it never
* gets freed (since it contains reserved firmware objects).
*/
++table->icm[i]->refcount;
}
return 0;
err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], use_coherent);
}
kvfree(table->icm);
return -ENOMEM;
}
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
{
int i;
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
}
kvfree(table->icm);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/icm.c
|
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include "mlx4.h"
#include "fw.h"
enum {
MLX4_RES_QP,
MLX4_RES_RDMARC,
MLX4_RES_ALTC,
MLX4_RES_AUXC,
MLX4_RES_SRQ,
MLX4_RES_CQ,
MLX4_RES_EQ,
MLX4_RES_DMPT,
MLX4_RES_CMPT,
MLX4_RES_MTT,
MLX4_RES_MCG,
MLX4_RES_NUM
};
static const char *res_name[] = {
[MLX4_RES_QP] = "QP",
[MLX4_RES_RDMARC] = "RDMARC",
[MLX4_RES_ALTC] = "ALTC",
[MLX4_RES_AUXC] = "AUXC",
[MLX4_RES_SRQ] = "SRQ",
[MLX4_RES_CQ] = "CQ",
[MLX4_RES_EQ] = "EQ",
[MLX4_RES_DMPT] = "DMPT",
[MLX4_RES_CMPT] = "CMPT",
[MLX4_RES_MTT] = "MTT",
[MLX4_RES_MCG] = "MCG",
};
u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_profile *request,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource {
u64 size;
u64 start;
int type;
u32 num;
int log_num;
};
u64 total_size = 0;
struct mlx4_resource *profile;
struct sysinfo si;
int i, j;
profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL);
if (!profile)
return -ENOMEM;
/*
* We want to scale the number of MTTs with the size of the
* system memory, since it makes sense to register a lot of
* memory on a system with a lot of memory. As a heuristic,
* make sure we have enough MTTs to cover twice the system
* memory (with PAGE_SIZE entries).
*
* This number has to be a power of two and fit into 32 bits
* due to device limitations, so cap this at 2^31 as well.
* That limits us to 8TB of memory registration per HCA with
* 4KB pages, which is probably OK for the next few months.
*/
si_meminfo(&si);
request->num_mtt =
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
min(1UL << (31 - log_mtts_per_seg),
(si.totalram << 1) >> log_mtts_per_seg)));
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz;
profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz;
profile[MLX4_RES_AUXC].size = dev_cap->aux_entry_sz;
profile[MLX4_RES_SRQ].size = dev_cap->srq_entry_sz;
profile[MLX4_RES_CQ].size = dev_cap->cqc_entry_sz;
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
profile[MLX4_RES_QP].num = request->num_qp;
profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
profile[MLX4_RES_ALTC].num = request->num_qp;
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs :
min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
profile[MLX4_RES_MCG].num = request->num_mcg;
for (i = 0; i < MLX4_RES_NUM; ++i) {
profile[i].type = i;
profile[i].num = roundup_pow_of_two(profile[i].num);
profile[i].log_num = ilog2(profile[i].num);
profile[i].size *= profile[i].num;
profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
}
/*
* Sort the resources in decreasing order of size. Since they
* all have sizes that are powers of 2, we'll be able to keep
* resources aligned to their size and pack them without gaps
* using the sorted order.
*/
for (i = MLX4_RES_NUM; i > 0; --i)
for (j = 1; j < i; ++j) {
if (profile[j].size > profile[j - 1].size)
swap(profile[j], profile[j - 1]);
}
for (i = 0; i < MLX4_RES_NUM; ++i) {
if (profile[i].size) {
profile[i].start = total_size;
total_size += profile[i].size;
}
if (total_size > dev_cap->max_icm_sz) {
mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
(unsigned long long) total_size,
(unsigned long long) dev_cap->max_icm_sz);
kfree(profile);
return -ENOMEM;
}
if (profile[i].size)
mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
i, res_name[profile[i].type],
profile[i].log_num,
(unsigned long long) profile[i].start,
(unsigned long long) profile[i].size);
}
mlx4_dbg(dev, "HCA context memory: reserving %d KB\n",
(int) (total_size >> 10));
for (i = 0; i < MLX4_RES_NUM; ++i) {
switch (profile[i].type) {
case MLX4_RES_QP:
dev->caps.num_qps = profile[i].num;
init_hca->qpc_base = profile[i].start;
init_hca->log_num_qps = profile[i].log_num;
break;
case MLX4_RES_RDMARC:
for (priv->qp_table.rdmarc_shift = 0;
request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num;
++priv->qp_table.rdmarc_shift)
; /* nothing */
dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift;
priv->qp_table.rdmarc_base = (u32) profile[i].start;
init_hca->rdmarc_base = profile[i].start;
init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift;
break;
case MLX4_RES_ALTC:
init_hca->altc_base = profile[i].start;
break;
case MLX4_RES_AUXC:
init_hca->auxc_base = profile[i].start;
break;
case MLX4_RES_SRQ:
dev->caps.num_srqs = profile[i].num;
init_hca->srqc_base = profile[i].start;
init_hca->log_num_srqs = profile[i].log_num;
break;
case MLX4_RES_CQ:
dev->caps.num_cqs = profile[i].num;
init_hca->cqc_base = profile[i].start;
init_hca->log_num_cqs = profile[i].log_num;
break;
case MLX4_RES_EQ:
if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
init_hca->log_num_eqs = 0x1f;
init_hca->eqc_base = profile[i].start;
init_hca->num_sys_eqs = dev_cap->num_sys_eqs;
} else {
dev->caps.num_eqs = roundup_pow_of_two(
min_t(unsigned,
dev_cap->max_eqs,
MAX_MSIX));
init_hca->eqc_base = profile[i].start;
init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
}
break;
case MLX4_RES_DMPT:
dev->caps.num_mpts = profile[i].num;
priv->mr_table.mpt_base = profile[i].start;
init_hca->dmpt_base = profile[i].start;
init_hca->log_mpt_sz = profile[i].log_num;
break;
case MLX4_RES_CMPT:
init_hca->cmpt_base = profile[i].start;
break;
case MLX4_RES_MTT:
dev->caps.num_mtts = profile[i].num;
priv->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
break;
case MLX4_RES_MCG:
init_hca->mc_base = profile[i].start;
init_hca->log_mc_entry_sz =
ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) {
dev->caps.num_mgms = profile[i].num;
} else {
init_hca->log_mc_hash_sz =
profile[i].log_num - 1;
dev->caps.num_mgms = profile[i].num >> 1;
dev->caps.num_amgms = profile[i].num >> 1;
}
break;
default:
break;
}
}
/*
* PDs don't take any HCA memory, but we assign them as part
* of the HCA profile anyway.
*/
dev->caps.num_pds = MLX4_NUM_PDS;
kfree(profile);
return total_size;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/profile.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/export.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "mlx4_stats.h"
#define MLX4_MAC_VALID (1ull << 63)
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
#define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1)
#define MLX4_FLAG2_V_USER_MTU_MASK BIT(5)
#define MLX4_FLAG2_V_USER_MAC_MASK BIT(6)
#define MLX4_FLAG_V_MTU_MASK BIT(0)
#define MLX4_FLAG_V_PPRX_MASK BIT(1)
#define MLX4_FLAG_V_PPTX_MASK BIT(2)
#define MLX4_IGNORE_FCS_MASK 0x1
#define MLX4_TC_MAX_NUMBER 8
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
int i;
mutex_init(&table->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
table->entries[i] = 0;
table->refs[i] = 0;
table->is_dup[i] = false;
}
table->max = 1 << dev->caps.log_num_macs;
table->total = 0;
}
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
{
int i;
mutex_init(&table->mutex);
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
table->entries[i] = 0;
table->refs[i] = 0;
table->is_dup[i] = false;
}
table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
table->total = 0;
}
void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
struct mlx4_roce_gid_table *table)
{
int i;
mutex_init(&table->mutex);
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
}
static int validate_index(struct mlx4_dev *dev,
struct mlx4_mac_table *table, int index)
{
int err = 0;
if (index < 0 || index >= table->max || !table->entries[index]) {
mlx4_warn(dev, "No valid Mac entry for the given index\n");
err = -EINVAL;
}
return err;
}
static int find_index(struct mlx4_dev *dev,
struct mlx4_mac_table *table, u64 mac)
{
int i;
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (table->refs[i] &&
(MLX4_MAC_MASK & mac) ==
(MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
return i;
}
/* Mac not found */
return -EINVAL;
}
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
__be64 *entries)
{
struct mlx4_cmd_mailbox *mailbox;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
int i;
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (!table->refs[i])
continue;
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
*idx = i;
return 0;
}
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
{
int i, num_eth_ports = 0;
if (!mlx4_is_mfunc(dev))
return false;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
++num_eth_ports;
return (num_eth_ports == 2) ? true : false;
}
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
int i, err = 0;
int free = -1;
int free_for_dup = -1;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
bool need_mf_bond = mlx4_need_mf_bond(dev);
bool can_mf_bond = true;
mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
(unsigned long long)mac, port,
dup ? "with" : "without");
if (need_mf_bond) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
}
if (need_mf_bond) {
int index_at_port = -1;
int index_at_dup_port = -1;
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
index_at_port = i;
if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
index_at_dup_port = i;
}
/* check that same mac is not in the tables at different indices */
if ((index_at_port != index_at_dup_port) &&
(index_at_port >= 0) &&
(index_at_dup_port >= 0))
can_mf_bond = false;
/* If the mac is already in the primary table, the slot must be
* available in the duplicate table as well.
*/
if (index_at_port >= 0 && index_at_dup_port < 0 &&
dup_table->refs[index_at_port]) {
can_mf_bond = false;
}
/* If the mac is already in the duplicate table, check that the
* corresponding index is not occupied in the primary table, or
* the primary table already contains the mac at the same index.
* Otherwise, you cannot bond (primary contains a different mac
* at that index).
*/
if (index_at_dup_port >= 0) {
if (!table->refs[index_at_dup_port] ||
((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
free_for_dup = index_at_dup_port;
else
can_mf_bond = false;
}
}
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (!table->refs[i]) {
if (free < 0)
free = i;
if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
if (!dup_table->refs[i])
free_for_dup = i;
}
continue;
}
if ((MLX4_MAC_MASK & mac) ==
(MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
/* MAC already registered, increment ref count */
err = i;
++table->refs[i];
if (dup) {
u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
if (dup_mac != mac || !dup_table->is_dup[i]) {
mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
mac, dup_port, i);
}
}
goto out;
}
}
if (need_mf_bond && (free_for_dup < 0)) {
if (dup) {
mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
dup = false;
}
can_mf_bond = false;
}
if (need_mf_bond && can_mf_bond)
free = free_for_dup;
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
/* No free mac entries */
err = -ENOSPC;
goto out;
}
/* Register new MAC */
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
(unsigned long long) mac);
table->entries[free] = 0;
goto out;
}
table->refs[free] = 1;
table->is_dup[free] = false;
++table->total;
if (dup) {
dup_table->refs[free] = 0;
dup_table->is_dup[free] = true;
dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
if (unlikely(err)) {
mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
dup_table->is_dup[free] = false;
dup_table->entries[free] = 0;
goto out;
}
++dup_table->total;
}
err = free;
out:
if (need_mf_bond) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
return err;
}
EXPORT_SYMBOL_GPL(__mlx4_register_mac);
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
u64 out_param = 0;
int err = -EINVAL;
if (mlx4_is_mfunc(dev)) {
if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
err = mlx4_cmd_imm(dev, mac, &out_param,
((u32) port) << 8 | (u32) RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
if (err && err == -EINVAL && mlx4_is_slave(dev)) {
/* retry using old REG_MAC format */
set_param_l(&out_param, port);
err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
dev->flags |= MLX4_FLAG_OLD_REG_MAC;
}
if (err)
return err;
return get_param_l(&out_param);
}
return __mlx4_register_mac(dev, port, mac);
}
EXPORT_SYMBOL_GPL(mlx4_register_mac);
int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
{
return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << dev->caps.log_num_macs);
}
EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info;
struct mlx4_mac_table *table;
int index;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
if (port < 1 || port > dev->caps.num_ports) {
mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
return;
}
info = &mlx4_priv(dev)->port[port];
table = &info->mac_table;
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
}
index = find_index(dev, table, mac);
if (validate_index(dev, table, index))
goto out;
if (--table->refs[index] || table->is_dup[index]) {
mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
index);
if (!table->refs[index])
dup_table->is_dup[index] = false;
goto out;
}
table->entries[index] = 0;
if (mlx4_set_port_mac_table(dev, port, table->entries))
mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
--table->total;
if (dup) {
dup_table->is_dup[index] = false;
if (dup_table->refs[index])
goto out;
dup_table->entries[index] = 0;
if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
--table->total;
}
out:
if (dup) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
}
EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
(void) mlx4_cmd_imm(dev, mac, &out_param,
((u32) port) << 8 | (u32) RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
} else {
/* use old unregister mac format */
set_param_l(&out_param, port);
(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
return;
}
__mlx4_unregister_mac(dev, port, mac);
return;
}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
int index = qpn - info->base_qpn;
int err = 0;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
/* CX1 doesn't support multi-functions */
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
}
err = validate_index(dev, table, index);
if (err)
goto out;
table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
(unsigned long long) new_mac);
table->entries[index] = 0;
} else {
if (dup) {
dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
if (unlikely(err)) {
mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
(unsigned long long)new_mac);
dup_table->entries[index] = 0;
}
}
}
out:
if (dup) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
return err;
}
EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
{
struct mlx4_cmd_mailbox *mailbox;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i;
for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
if (table->refs[i] &&
(vid == (MLX4_VLAN_MASK &
be32_to_cpu(table->entries[i])))) {
/* VLAN already registered, increase reference count */
*idx = i;
return 0;
}
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i, err = 0;
int free = -1;
int free_for_dup = -1;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
bool need_mf_bond = mlx4_need_mf_bond(dev);
bool can_mf_bond = true;
mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
vlan, port,
dup ? "with" : "without");
if (need_mf_bond) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
}
if (table->total == table->max) {
/* No free vlan entries */
err = -ENOSPC;
goto out;
}
if (need_mf_bond) {
int index_at_port = -1;
int index_at_dup_port = -1;
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))
index_at_port = i;
if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))
index_at_dup_port = i;
}
/* check that same vlan is not in the tables at different indices */
if ((index_at_port != index_at_dup_port) &&
(index_at_port >= 0) &&
(index_at_dup_port >= 0))
can_mf_bond = false;
/* If the vlan is already in the primary table, the slot must be
* available in the duplicate table as well.
*/
if (index_at_port >= 0 && index_at_dup_port < 0 &&
dup_table->refs[index_at_port]) {
can_mf_bond = false;
}
/* If the vlan is already in the duplicate table, check that the
* corresponding index is not occupied in the primary table, or
* the primary table already contains the vlan at the same index.
* Otherwise, you cannot bond (primary contains a different vlan
* at that index).
*/
if (index_at_dup_port >= 0) {
if (!table->refs[index_at_dup_port] ||
(vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
free_for_dup = index_at_dup_port;
else
can_mf_bond = false;
}
}
for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
if (!table->refs[i]) {
if (free < 0)
free = i;
if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
if (!dup_table->refs[i])
free_for_dup = i;
}
}
if ((table->refs[i] || table->is_dup[i]) &&
(vlan == (MLX4_VLAN_MASK &
be32_to_cpu(table->entries[i])))) {
/* Vlan already registered, increase references count */
mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
*index = i;
++table->refs[i];
if (dup) {
u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
if (dup_vlan != vlan || !dup_table->is_dup[i]) {
mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
vlan, dup_port, i);
}
}
goto out;
}
}
if (need_mf_bond && (free_for_dup < 0)) {
if (dup) {
mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
dup = false;
}
can_mf_bond = false;
}
if (need_mf_bond && can_mf_bond)
free = free_for_dup;
if (free < 0) {
err = -ENOMEM;
goto out;
}
/* Register new VLAN */
table->refs[free] = 1;
table->is_dup[free] = false;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
err = mlx4_set_port_vlan_table(dev, port, table->entries);
if (unlikely(err)) {
mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
table->refs[free] = 0;
table->entries[free] = 0;
goto out;
}
++table->total;
if (dup) {
dup_table->refs[free] = 0;
dup_table->is_dup[free] = true;
dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
if (unlikely(err)) {
mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
dup_table->is_dup[free] = false;
dup_table->entries[free] = 0;
goto out;
}
++dup_table->total;
}
*index = free;
out:
if (need_mf_bond) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
return err;
}
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
{
u64 out_param = 0;
int err;
if (vlan > 4095)
return -EINVAL;
if (mlx4_is_mfunc(dev)) {
err = mlx4_cmd_imm(dev, vlan, &out_param,
((u32) port) << 8 | (u32) RES_VLAN,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
*index = get_param_l(&out_param);
return err;
}
return __mlx4_register_vlan(dev, port, vlan, index);
}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int index;
bool dup = mlx4_is_mf_bonded(dev);
u8 dup_port = (port == 1) ? 2 : 1;
struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
if (dup) {
if (port == 1) {
mutex_lock(&table->mutex);
mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&dup_table->mutex);
mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
}
} else {
mutex_lock(&table->mutex);
}
if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
goto out;
}
if (index < MLX4_VLAN_REGULAR) {
mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
goto out;
}
if (--table->refs[index] || table->is_dup[index]) {
mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
table->refs[index], index);
if (!table->refs[index])
dup_table->is_dup[index] = false;
goto out;
}
table->entries[index] = 0;
if (mlx4_set_port_vlan_table(dev, port, table->entries))
mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
--table->total;
if (dup) {
dup_table->is_dup[index] = false;
if (dup_table->refs[index])
goto out;
dup_table->entries[index] = 0;
if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
--dup_table->total;
}
out:
if (dup) {
if (port == 2) {
mutex_unlock(&table->mutex);
mutex_unlock(&dup_table->mutex);
} else {
mutex_unlock(&dup_table->mutex);
mutex_unlock(&table->mutex);
}
} else {
mutex_unlock(&table->mutex);
}
}
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
{
u64 out_param = 0;
if (mlx4_is_mfunc(dev)) {
(void) mlx4_cmd_imm(dev, vlan, &out_param,
((u32) port) << 8 | (u32) RES_VLAN,
RES_OP_RESERVE_AND_MAP,
MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
return;
}
__mlx4_unregister_vlan(dev, port, vlan);
}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
int mlx4_bond_mac_table(struct mlx4_dev *dev)
{
struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
int ret = 0;
int i;
bool update1 = false;
bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if ((t1->entries[i] != t2->entries[i]) &&
t1->entries[i] && t2->entries[i]) {
mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
ret = -EINVAL;
goto unlock;
}
}
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (t1->entries[i] && !t2->entries[i]) {
t2->entries[i] = t1->entries[i];
t2->is_dup[i] = true;
update2 = true;
} else if (!t1->entries[i] && t2->entries[i]) {
t1->entries[i] = t2->entries[i];
t1->is_dup[i] = true;
update1 = true;
} else if (t1->entries[i] && t2->entries[i]) {
t1->is_dup[i] = true;
t2->is_dup[i] = true;
}
}
if (update1) {
ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
if (ret)
mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
}
if (!ret && update2) {
ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
if (ret)
mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
}
if (ret)
mlx4_warn(dev, "failed to create mirror MAC tables\n");
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex);
return ret;
}
int mlx4_unbond_mac_table(struct mlx4_dev *dev)
{
struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
int ret = 0;
int ret1;
int i;
bool update1 = false;
bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (t1->entries[i] != t2->entries[i]) {
mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
ret = -EINVAL;
goto unlock;
}
}
for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
if (!t1->entries[i])
continue;
t1->is_dup[i] = false;
if (!t1->refs[i]) {
t1->entries[i] = 0;
update1 = true;
}
t2->is_dup[i] = false;
if (!t2->refs[i]) {
t2->entries[i] = 0;
update2 = true;
}
}
if (update1) {
ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
if (ret)
mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
}
if (update2) {
ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
if (ret1) {
mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
ret = ret1;
}
}
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex);
return ret;
}
int mlx4_bond_vlan_table(struct mlx4_dev *dev)
{
struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
int ret = 0;
int i;
bool update1 = false;
bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if ((t1->entries[i] != t2->entries[i]) &&
t1->entries[i] && t2->entries[i]) {
mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
ret = -EINVAL;
goto unlock;
}
}
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (t1->entries[i] && !t2->entries[i]) {
t2->entries[i] = t1->entries[i];
t2->is_dup[i] = true;
update2 = true;
} else if (!t1->entries[i] && t2->entries[i]) {
t1->entries[i] = t2->entries[i];
t1->is_dup[i] = true;
update1 = true;
} else if (t1->entries[i] && t2->entries[i]) {
t1->is_dup[i] = true;
t2->is_dup[i] = true;
}
}
if (update1) {
ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
if (ret)
mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
}
if (!ret && update2) {
ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
if (ret)
mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
}
if (ret)
mlx4_warn(dev, "failed to create mirror VLAN tables\n");
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex);
return ret;
}
int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
{
struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
int ret = 0;
int ret1;
int i;
bool update1 = false;
bool update2 = false;
mutex_lock(&t1->mutex);
mutex_lock(&t2->mutex);
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (t1->entries[i] != t2->entries[i]) {
mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
ret = -EINVAL;
goto unlock;
}
}
for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
if (!t1->entries[i])
continue;
t1->is_dup[i] = false;
if (!t1->refs[i]) {
t1->entries[i] = 0;
update1 = true;
}
t2->is_dup[i] = false;
if (!t2->refs[i]) {
t2->entries[i] = 0;
update2 = true;
}
}
if (update1) {
ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
if (ret)
mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
}
if (update2) {
ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
if (ret1) {
mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
ret = ret1;
}
}
unlock:
mutex_unlock(&t2->mutex);
mutex_unlock(&t1->mutex);
return ret;
}
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
{
struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
u8 *inbuf, *outbuf;
int err;
inmailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inmailbox))
return PTR_ERR(inmailbox);
outmailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outmailbox)) {
mlx4_free_cmd_mailbox(dev, inmailbox);
return PTR_ERR(outmailbox);
}
inbuf = inmailbox->buf;
outbuf = outmailbox->buf;
inbuf[0] = 1;
inbuf[1] = 1;
inbuf[2] = 1;
inbuf[3] = 1;
*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (!err)
*caps = *(__be32 *) (outbuf + 84);
mlx4_free_cmd_mailbox(dev, inmailbox);
mlx4_free_cmd_mailbox(dev, outmailbox);
return err;
}
static struct mlx4_roce_gid_entry zgid_entry;
int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
{
int vfs;
int slave_gid = slave;
unsigned i;
struct mlx4_slaves_pport slaves_pport;
struct mlx4_active_ports actv_ports;
unsigned max_port_p_one;
if (slave == 0)
return MLX4_ROCE_PF_GIDS;
/* Slave is a VF */
slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
actv_ports = mlx4_get_active_ports(dev, slave);
max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
for (i = 1; i < max_port_p_one; i++) {
struct mlx4_active_ports exclusive_ports;
struct mlx4_slaves_pport slaves_pport_actv;
bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
set_bit(i - 1, exclusive_ports.ports);
if (i == port)
continue;
slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports);
slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
dev->persist->num_vfs + 1);
}
vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
}
int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
{
int gids;
unsigned i;
int slave_gid = slave;
int vfs;
struct mlx4_slaves_pport slaves_pport;
struct mlx4_active_ports actv_ports;
unsigned max_port_p_one;
if (slave == 0)
return 0;
slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
actv_ports = mlx4_get_active_ports(dev, slave);
max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
for (i = 1; i < max_port_p_one; i++) {
struct mlx4_active_ports exclusive_ports;
struct mlx4_slaves_pport slaves_pport_actv;
bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
set_bit(i - 1, exclusive_ports.ports);
if (i == port)
continue;
slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports);
slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
dev->persist->num_vfs + 1);
}
gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
if (slave_gid <= gids % vfs)
return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
return MLX4_ROCE_PF_GIDS + (gids % vfs) +
((gids / vfs) * (slave_gid - 1));
}
EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
int port, struct mlx4_cmd_mailbox *mailbox)
{
struct mlx4_roce_gid_entry *gid_entry_mbox;
struct mlx4_priv *priv = mlx4_priv(dev);
int num_gids, base, offset;
int i, err;
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
base = mlx4_get_base_gid_ix(dev, slave, port);
memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
mutex_lock(&(priv->port[port].gid_table.mutex));
/* Zero-out gids belonging to that slave in the port GID table */
for (i = 0, offset = base; i < num_gids; offset++, i++)
memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
/* Now, copy roce port gids table to mailbox for passing to FW */
gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
memcpy(gid_entry_mbox->raw,
priv->port[port].gid_table.roce_gids[i].raw,
MLX4_ROCE_GID_ENTRY_SIZE);
err = mlx4_cmd(dev, mailbox->dma,
((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mutex_unlock(&(priv->port[port].gid_table.mutex));
return err;
}
void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
{
struct mlx4_active_ports actv_ports;
struct mlx4_cmd_mailbox *mailbox;
int num_eth_ports, err;
int i;
if (slave < 0 || slave > dev->persist->num_vfs)
return;
actv_ports = mlx4_get_active_ports(dev, slave);
for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
if (test_bit(i, actv_ports.ports)) {
if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
continue;
num_eth_ports++;
}
}
if (!num_eth_ports)
return;
/* have ETH ports. Alloc mailbox for SET_PORT command */
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return;
for (i = 0; i < dev->caps.num_ports; i++) {
if (test_bit(i, actv_ports.ports)) {
if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
continue;
err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
if (err)
mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
slave, i + 1, err);
}
}
mlx4_free_cmd_mailbox(dev, mailbox);
return;
}
static void
mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
struct mlx4_set_port_general_context *gen_context)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
struct mlx4_slave_state *slave_st = &master->slave_state[slave];
u16 mtu, prev_mtu;
/* Mtu is configured as the max USER_MTU among all
* the functions on the port.
*/
mtu = be16_to_cpu(gen_context->mtu);
mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
prev_mtu = slave_st->mtu[port];
slave_st->mtu[port] = mtu;
if (mtu > master->max_mtu[port])
master->max_mtu[port] = mtu;
if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
int i;
slave_st->mtu[port] = mtu;
master->max_mtu[port] = mtu;
for (i = 0; i < dev->num_slaves; i++)
master->max_mtu[port] =
max_t(u16, master->max_mtu[port],
master->slave_state[i].mtu[port]);
}
gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
}
static void
mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
struct mlx4_set_port_general_context *gen_context)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
struct mlx4_slave_state *slave_st = &master->slave_state[slave];
u16 user_mtu, prev_user_mtu;
/* User Mtu is configured as the max USER_MTU among all
* the functions on the port.
*/
user_mtu = be16_to_cpu(gen_context->user_mtu);
user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
prev_user_mtu = slave_st->user_mtu[port];
slave_st->user_mtu[port] = user_mtu;
if (user_mtu > master->max_user_mtu[port])
master->max_user_mtu[port] = user_mtu;
if (user_mtu < prev_user_mtu &&
prev_user_mtu == master->max_user_mtu[port]) {
int i;
slave_st->user_mtu[port] = user_mtu;
master->max_user_mtu[port] = user_mtu;
for (i = 0; i < dev->num_slaves; i++)
master->max_user_mtu[port] =
max_t(u16, master->max_user_mtu[port],
master->slave_state[i].user_mtu[port]);
}
gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
}
static void
mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
struct mlx4_set_port_general_context *gen_context)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
/* Slave cannot change Global Pause configuration */
if (slave != mlx4_master_func_num(dev) &&
(gen_context->pptx != master->pptx ||
gen_context->pprx != master->pprx)) {
gen_context->pptx = master->pptx;
gen_context->pprx = master->pprx;
mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
slave);
} else {
master->pptx = gen_context->pptx;
master->pprx = gen_context->pprx;
}
}
static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
u8 op_mod, struct mlx4_cmd_mailbox *inbox)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_port_info *port_info;
struct mlx4_set_port_rqp_calc_context *qpn_context;
struct mlx4_set_port_general_context *gen_context;
struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
int reset_qkey_viols;
int port;
int is_eth;
int num_gids;
int base;
u32 in_modifier;
u32 promisc;
int err;
int i, j;
int offset;
__be32 agg_cap_mask;
__be32 slave_cap_mask;
__be32 new_cap_mask;
port = in_mod & 0xff;
in_modifier = in_mod >> 8;
is_eth = op_mod;
port_info = &priv->port[port];
/* Slaves cannot perform SET_PORT operations,
* except for changing MTU and USER_MTU.
*/
if (is_eth) {
if (slave != dev->caps.function &&
in_modifier != MLX4_SET_PORT_GENERAL &&
in_modifier != MLX4_SET_PORT_GID_TABLE) {
mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
slave);
return -EINVAL;
}
switch (in_modifier) {
case MLX4_SET_PORT_RQP_CALC:
qpn_context = inbox->buf;
qpn_context->base_qpn =
cpu_to_be32(port_info->base_qpn);
qpn_context->n_mac = 0x7;
promisc = be32_to_cpu(qpn_context->promisc) >>
SET_PORT_PROMISC_SHIFT;
qpn_context->promisc = cpu_to_be32(
promisc << SET_PORT_PROMISC_SHIFT |
port_info->base_qpn);
promisc = be32_to_cpu(qpn_context->mcast) >>
SET_PORT_MC_PROMISC_SHIFT;
qpn_context->mcast = cpu_to_be32(
promisc << SET_PORT_MC_PROMISC_SHIFT |
port_info->base_qpn);
break;
case MLX4_SET_PORT_GENERAL:
gen_context = inbox->buf;
if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
mlx4_en_set_port_mtu(dev, slave, port,
gen_context);
if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
mlx4_en_set_port_user_mtu(dev, slave, port,
gen_context);
if (gen_context->flags &
(MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
mlx4_en_set_port_global_pause(dev, slave,
gen_context);
break;
case MLX4_SET_PORT_GID_TABLE:
/* change to MULTIPLE entries: number of guest's gids
* need a FOR-loop here over number of gids the guest has.
* 1. Check no duplicates in gids passed by slave
*/
num_gids = mlx4_get_slave_num_gids(dev, slave, port);
base = mlx4_get_base_gid_ix(dev, slave, port);
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
sizeof(zgid_entry)))
continue;
gid_entry_mb1 = gid_entry_mbox + 1;
for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
if (!memcmp(gid_entry_mb1->raw,
zgid_entry.raw, sizeof(zgid_entry)))
continue;
if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
sizeof(gid_entry_mbox->raw))) {
/* found duplicate */
return -EINVAL;
}
}
}
/* 2. Check that do not have duplicates in OTHER
* entries in the port GID table
*/
mutex_lock(&(priv->port[port].gid_table.mutex));
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
if (i >= base && i < base + num_gids)
continue; /* don't compare to slave's current gids */
gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
continue;
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
sizeof(zgid_entry)))
continue;
if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
sizeof(gid_entry_tbl->raw))) {
/* found duplicate */
mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
slave, i);
mutex_unlock(&(priv->port[port].gid_table.mutex));
return -EINVAL;
}
}
}
/* insert slave GIDs with memcpy, starting at slave's base index */
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
/* Now, copy roce port gids table to current mailbox for passing to FW */
gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
memcpy(gid_entry_mbox->raw,
priv->port[port].gid_table.roce_gids[i].raw,
MLX4_ROCE_GID_ENTRY_SIZE);
err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
mutex_unlock(&(priv->port[port].gid_table.mutex));
return err;
}
return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
}
/* Slaves are not allowed to SET_PORT beacon (LED) blink */
if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
return -EPERM;
}
/* For IB, we only consider:
* - The capability mask, which is set to the aggregate of all
* slave function capabilities
* - The QKey violatin counter - reset according to each request.
*/
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
new_cap_mask = ((__be32 *) inbox->buf)[2];
} else {
reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
new_cap_mask = ((__be32 *) inbox->buf)[1];
}
/* slave may not set the IS_SM capability for the port */
if (slave != mlx4_master_func_num(dev) &&
(be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
return -EINVAL;
/* No DEV_MGMT in multifunc mode */
if (mlx4_is_mfunc(dev) &&
(be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
return -EINVAL;
agg_cap_mask = 0;
slave_cap_mask =
priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
for (i = 0; i < dev->num_slaves; i++)
agg_cap_mask |=
priv->mfunc.master.slave_state[i].ib_cap_mask[port];
/* only clear mailbox for guests. Master may be setting
* MTU or PKEY table size
*/
if (slave != dev->caps.function)
memset(inbox->buf, 0, 256);
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
*(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
((__be32 *) inbox->buf)[2] = agg_cap_mask;
} else {
((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
((__be32 *) inbox->buf)[1] = agg_cap_mask;
}
err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
slave_cap_mask;
return err;
}
int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int port = mlx4_slave_convert_port(
dev, slave, vhcr->in_modifier & 0xFF);
if (port < 0)
return -EINVAL;
vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
(port & 0xFF);
return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
vhcr->op_modifier, inbox);
}
/* bit locations for set port command with zero op modifier */
enum {
MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
MLX4_CHANGE_PORT_VL_CAP = 21,
MLX4_CHANGE_PORT_MTU_CAP = 22,
};
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
{
struct mlx4_cmd_mailbox *mailbox;
int err, vl_cap, pkey_tbl_flag = 0;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
pkey_tbl_flag = 1;
((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
}
/* IB VL CAP enum isn't used by the firmware, just numerical values */
for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
((__be32 *) mailbox->buf)[0] = cpu_to_be32(
(1 << MLX4_CHANGE_PORT_MTU_CAP) |
(1 << MLX4_CHANGE_PORT_VL_CAP) |
(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
(vl_cap << MLX4_SET_PORT_VL_CAP));
err = mlx4_cmd(dev, mailbox->dma, port,
MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
if (err != -ENOMEM)
break;
}
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
#define SET_PORT_ROCE_2_FLAGS 0x10
#define MLX4_SET_PORT_ROCE_V1_V2 0x2
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
int err;
u32 in_mod;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
context->flags = SET_PORT_GEN_ALL_VALID;
context->mtu = cpu_to_be16(mtu);
context->pptx = (pptx * (!pfctx)) << 7;
context->pfctx = pfctx;
context->pprx = (pprx * (!pfcrx)) << 7;
context->pfcrx = pfcrx;
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
context->flags |= SET_PORT_ROCE_2_FLAGS;
context->roce_mode |=
MLX4_SET_PORT_ROCE_V1_V2 << 4;
}
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_general);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_rqp_calc_context *context;
int err;
u32 in_mod;
u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
MCAST_DIRECT : MCAST_DEFAULT;
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
context->base_qpn = cpu_to_be32(base_qpn);
context->n_mac = dev->caps.log_num_macs;
context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
base_qpn);
context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
base_qpn);
context->intra_no_vlan = 0;
context->no_vlan = MLX4_NO_VLAN_IDX;
context->intra_vlan_miss = 0;
context->vlan_miss = MLX4_VLAN_MISS_IDX;
in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
context->user_mtu = cpu_to_be16(user_mtu);
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
context->flags2 |= MLX4_FLAG2_V_USER_MAC_MASK;
memcpy(context->user_mac, user_mac, sizeof(context->user_mac));
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_user_mac);
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
if (ignore_fcs_value)
context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
else
context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
enum {
VXLAN_ENABLE_MODIFY = 1 << 7,
VXLAN_STEERING_MODIFY = 1 << 6,
VXLAN_ENABLE = 1 << 7,
};
struct mlx4_set_port_vxlan_context {
u32 reserved1;
u8 modify_flags;
u8 reserved2;
u8 enable_flags;
u8 steering;
};
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
{
int err;
u32 in_mod;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_vxlan_context *context;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
memset(context, 0, sizeof(*context));
context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
if (enable)
context->enable_flags = VXLAN_ENABLE;
context->steering = steering;
in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
{
int err;
struct mlx4_cmd_mailbox *mailbox;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
*((__be32 *)mailbox->buf) = cpu_to_be32(time);
err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err = 0;
return err;
}
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
u64 mac, u64 clear, u8 mode)
{
return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
int err = 0;
return err;
}
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
return 0;
}
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
int *slave_id)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i, found_ix = -1;
int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
struct mlx4_slaves_pport slaves_pport;
unsigned num_vfs;
int slave_gid;
if (!mlx4_is_mfunc(dev))
return -EINVAL;
slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
num_vfs = bitmap_weight(slaves_pport.slaves,
dev->persist->num_vfs + 1) - 1;
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
MLX4_ROCE_GID_ENTRY_SIZE)) {
found_ix = i;
break;
}
}
if (found_ix >= 0) {
/* Calculate a slave_gid which is the slave number in the gid
* table and not a globally unique slave number.
*/
if (found_ix < MLX4_ROCE_PF_GIDS)
slave_gid = 0;
else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
(vf_gids / num_vfs + 1))
slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
(vf_gids / num_vfs + 1)) + 1;
else
slave_gid =
((found_ix - MLX4_ROCE_PF_GIDS -
((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
(vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
/* Calculate the globally unique slave id */
if (slave_gid) {
struct mlx4_active_ports exclusive_ports;
struct mlx4_active_ports actv_ports;
struct mlx4_slaves_pport slaves_pport_actv;
unsigned max_port_p_one;
int num_vfs_before = 0;
int candidate_slave_gid;
/* Calculate how many VFs are on the previous port, if exists */
for (i = 1; i < port; i++) {
bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
set_bit(i - 1, exclusive_ports.ports);
slaves_pport_actv =
mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports);
num_vfs_before += bitmap_weight(
slaves_pport_actv.slaves,
dev->persist->num_vfs + 1);
}
/* candidate_slave_gid isn't necessarily the correct slave, but
* it has the same number of ports and is assigned to the same
* ports as the real slave we're looking for. On dual port VF,
* slave_gid = [single port VFs on port <port>] +
* [offset of the current slave from the first dual port VF] +
* 1 (for the PF).
*/
candidate_slave_gid = slave_gid + num_vfs_before;
actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
max_port_p_one = find_first_bit(
actv_ports.ports, dev->caps.num_ports) +
bitmap_weight(actv_ports.ports,
dev->caps.num_ports) + 1;
/* Calculate the real slave number */
for (i = 1; i < max_port_p_one; i++) {
if (i == port)
continue;
bitmap_zero(exclusive_ports.ports,
dev->caps.num_ports);
set_bit(i - 1, exclusive_ports.ports);
slaves_pport_actv =
mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports);
slave_gid += bitmap_weight(
slaves_pport_actv.slaves,
dev->persist->num_vfs + 1);
}
}
*slave_id = slave_gid;
}
return (found_ix >= 0) ? 0 : -EINVAL;
}
EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
u8 *gid)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (!mlx4_is_master(dev))
return -EINVAL;
memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
MLX4_ROCE_GID_ENTRY_SIZE);
return 0;
}
EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
/* Cable Module Info */
#define MODULE_INFO_MAX_READ 48
#define I2C_ADDR_LOW 0x50
#define I2C_ADDR_HIGH 0x51
#define I2C_PAGE_SIZE 256
#define I2C_HIGH_PAGE_SIZE 128
/* Module Info Data */
struct mlx4_cable_info {
u8 i2c_addr;
u8 page_num;
__be16 dev_mem_address;
__be16 reserved1;
__be16 size;
__be32 reserved2[2];
u8 data[MODULE_INFO_MAX_READ];
};
enum cable_info_err {
CABLE_INF_INV_PORT = 0x1,
CABLE_INF_OP_NOSUP = 0x2,
CABLE_INF_NOT_CONN = 0x3,
CABLE_INF_NO_EEPRM = 0x4,
CABLE_INF_PAGE_ERR = 0x5,
CABLE_INF_INV_ADDR = 0x6,
CABLE_INF_I2C_ADDR = 0x7,
CABLE_INF_QSFP_VIO = 0x8,
CABLE_INF_I2C_BUSY = 0x9,
};
#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
static inline const char *cable_info_mad_err_str(u16 mad_status)
{
u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
switch (err) {
case CABLE_INF_INV_PORT:
return "invalid port selected";
case CABLE_INF_OP_NOSUP:
return "operation not supported for this port (the port is of type CX4 or internal)";
case CABLE_INF_NOT_CONN:
return "cable is not connected";
case CABLE_INF_NO_EEPRM:
return "the connected cable has no EPROM (passive copper cable)";
case CABLE_INF_PAGE_ERR:
return "page number is greater than 15";
case CABLE_INF_INV_ADDR:
return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
case CABLE_INF_I2C_ADDR:
return "invalid I2C slave address";
case CABLE_INF_QSFP_VIO:
return "at least one cable violates the QSFP specification and ignores the modsel signal";
case CABLE_INF_I2C_BUSY:
return "I2C bus is constantly busy";
}
return "Unknown Error";
}
static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
{
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
int ret;
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
outbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outbox)) {
mlx4_free_cmd_mailbox(dev, inbox);
return PTR_ERR(outbox);
}
inmad = (struct mlx4_mad_ifc *)(inbox->buf);
outmad = (struct mlx4_mad_ifc *)(outbox->buf);
inmad->method = 0x1; /* Get */
inmad->class_version = 0x1;
inmad->mgmt_class = 0x1;
inmad->base_version = 0x1;
inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = 0;
cable_info->page_num = 0;
cable_info->i2c_addr = I2C_ADDR_LOW;
cable_info->size = cpu_to_be16(1);
ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (ret)
goto out;
if (be16_to_cpu(outmad->status)) {
/* Mad returned with bad status */
ret = be16_to_cpu(outmad->status);
mlx4_warn(dev,
"MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
cable_info_mad_err_str(ret));
ret = -ret;
goto out;
}
cable_info = (struct mlx4_cable_info *)outmad->data;
*module_id = cable_info->data[0];
out:
mlx4_free_cmd_mailbox(dev, inbox);
mlx4_free_cmd_mailbox(dev, outbox);
return ret;
}
static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
{
*i2c_addr = I2C_ADDR_LOW;
*page_num = 0;
if (*offset < I2C_PAGE_SIZE)
return;
*i2c_addr = I2C_ADDR_HIGH;
*offset -= I2C_PAGE_SIZE;
}
static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
{
/* Offsets 0-255 belong to page 0.
* Offsets 256-639 belong to pages 01, 02, 03.
* For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
*/
if (*offset < I2C_PAGE_SIZE)
*page_num = 0;
else
*page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
*i2c_addr = I2C_ADDR_LOW;
*offset -= *page_num * I2C_HIGH_PAGE_SIZE;
}
/**
* mlx4_get_module_info - Read cable module eeprom data
* @dev: mlx4_dev.
* @port: port number.
* @offset: byte offset in eeprom to start reading data from.
* @size: num of bytes to read.
* @data: output buffer to put the requested data into.
*
* Reads cable module eeprom data, puts the outcome data into
* data pointer paramer.
* Returns num of read bytes on success or a negative error
* code.
*/
int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
u16 offset, u16 size, u8 *data)
{
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
u8 module_id, i2c_addr, page_num;
int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
ret = mlx4_get_module_id(dev, port, &module_id);
if (ret)
return ret;
switch (module_id) {
case MLX4_MODULE_ID_SFP:
mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
break;
case MLX4_MODULE_ID_QSFP:
case MLX4_MODULE_ID_QSFP_PLUS:
case MLX4_MODULE_ID_QSFP28:
mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
break;
default:
mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
return -EINVAL;
}
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
outbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(outbox)) {
mlx4_free_cmd_mailbox(dev, inbox);
return PTR_ERR(outbox);
}
inmad = (struct mlx4_mad_ifc *)(inbox->buf);
outmad = (struct mlx4_mad_ifc *)(outbox->buf);
inmad->method = 0x1; /* Get */
inmad->class_version = 0x1;
inmad->mgmt_class = 0x1;
inmad->base_version = 0x1;
inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
/* Cross pages reads are not allowed
* read until offset 256 in low page
*/
size -= offset + size - I2C_PAGE_SIZE;
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
cable_info->page_num = page_num;
cable_info->i2c_addr = i2c_addr;
cable_info->size = cpu_to_be16(size);
ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
if (ret)
goto out;
if (be16_to_cpu(outmad->status)) {
/* Mad returned with bad status */
ret = be16_to_cpu(outmad->status);
mlx4_warn(dev,
"MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
0xFF60, port, i2c_addr, offset, size,
ret, cable_info_mad_err_str(ret));
if (i2c_addr == I2C_ADDR_HIGH &&
MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
/* Some SFP cables do not support i2c slave
* address 0x51 (high page), abort silently.
*/
ret = 0;
else
ret = -ret;
goto out;
}
cable_info = (struct mlx4_cable_info *)outmad->data;
memcpy(data, cable_info->data, size);
ret = size;
out:
mlx4_free_cmd_mailbox(dev, inbox);
mlx4_free_cmd_mailbox(dev, outbox);
return ret;
}
EXPORT_SYMBOL(mlx4_get_module_info);
int mlx4_max_tc(struct mlx4_dev *dev)
{
u8 num_tc = dev->caps.max_tc_eth;
if (!num_tc)
num_tc = MLX4_TC_MAX_NUMBER;
return num_tc;
}
EXPORT_SYMBOL(mlx4_max_tc);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/port.c
|
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/hardirq.h>
#include <linux/export.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
#include "mlx4.h"
#include "icm.h"
#define MLX4_CQ_STATUS_OK ( 0 << 28)
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_CQ_FLAG_CC ( 1 << 18)
#define MLX4_CQ_FLAG_OI ( 1 << 17)
#define MLX4_CQ_STATE_ARMED ( 9 << 8)
#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
#define MLX4_EQ_STATE_FIRED (10 << 8)
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
void mlx4_cq_tasklet_cb(struct tasklet_struct *t)
{
unsigned long flags;
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
struct mlx4_eq_tasklet *ctx = from_tasklet(ctx, t, task);
struct mlx4_cq *mcq, *temp;
spin_lock_irqsave(&ctx->lock, flags);
list_splice_tail_init(&ctx->list, &ctx->process_list);
spin_unlock_irqrestore(&ctx->lock, flags);
list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
}
if (!list_empty(&ctx->process_list))
tasklet_schedule(&ctx->task);
}
static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
{
struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
unsigned long flags;
bool kick;
spin_lock_irqsave(&tasklet_ctx->lock, flags);
/* When migrating CQs between EQs will be implemented, please note
* that you need to sync this point. It is possible that
* while migrating a CQ, completions on the old EQs could
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
refcount_inc(&cq->refcount);
kick = list_empty(&tasklet_ctx->list);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
if (kick)
tasklet_schedule(&tasklet_ctx->task);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
rcu_read_unlock();
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
/* Acessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn;
cq->comp(cq);
}
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
{
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
rcu_read_unlock();
if (!cq) {
mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
/* Acessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num, u8 opmod)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod,
MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num, u32 opmod)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
u16 count, u16 period)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_cq_context *cq_context;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
cq_context->cq_max_count = cpu_to_be16(count);
cq_context->cq_period = cpu_to_be16(period);
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_cq_modify);
int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
int entries, struct mlx4_mtt *mtt)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_cq_context *cq_context;
u64 mtt_addr;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
cq_context = mailbox->buf;
cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
cq_context->log_page_size = mtt->page_shift - 12;
mtt_addr = mlx4_mtt_addr(dev, mtt);
cq_context->mtt_base_addr_h = mtt_addr >> 32;
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
int err;
*cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
if (*cqn == -1)
return -ENOMEM;
err = mlx4_table_get(dev, &cq_table->table, *cqn);
if (err)
goto err_out;
err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
if (err)
goto err_put;
return 0;
err_put:
mlx4_table_put(dev, &cq_table->table, *cqn);
err_out:
mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
return err;
}
static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn, u8 usage)
{
u32 in_modifier = RES_CQ | (((u32)usage & 3) << 30);
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
return err;
else {
*cqn = get_param_l(&out_param);
return 0;
}
}
return __mlx4_cq_alloc_icm(dev, cqn);
}
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
mlx4_table_put(dev, &cq_table->table, cqn);
mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
}
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
{
u64 in_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, cqn);
err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
} else
__mlx4_cq_free_icm(dev, cqn);
}
static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
{
int entries_per_copy = PAGE_SIZE / cqe_size;
void *init_ents;
int err = 0;
int i;
init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!init_ents)
return -ENOMEM;
/* Populate a list of CQ entries to reduce the number of
* copy_to_user calls. 0xcc is the initialization value
* required by the FW.
*/
memset(init_ents, 0xcc, PAGE_SIZE);
if (entries_per_copy < entries) {
for (i = 0; i < entries / entries_per_copy; i++) {
err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ?
-EFAULT : 0;
if (err)
goto out;
buf += PAGE_SIZE;
}
} else {
err = copy_to_user((void __user *)buf, init_ents,
array_size(entries, cqe_size)) ?
-EFAULT : 0;
}
out:
kfree(init_ents);
return err;
}
static void mlx4_init_kernel_cqes(struct mlx4_buf *buf,
int entries,
int cqe_size)
{
int i;
if (buf->nbufs == 1)
memset(buf->direct.buf, 0xcc, entries * cqe_size);
else
for (i = 0; i < buf->npages; i++)
memset(buf->page_list[i].buf, 0xcc,
1UL << buf->page_shift);
}
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
struct mlx4_cq *cq, unsigned vector, int collapsed,
int timestamp_en, void *buf_addr, bool user_cq)
{
bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_cq_context *cq_context;
u64 mtt_addr;
int err;
if (vector >= dev->caps.num_comp_vectors)
return -EINVAL;
cq->vector = vector;
err = mlx4_cq_alloc_icm(dev, &cq->cqn, cq->usage);
if (err)
return err;
spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_radix;
}
cq_context = mailbox->buf;
cq_context->flags = cpu_to_be32(!!collapsed << 18);
if (timestamp_en)
cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage =
cpu_to_be32((ilog2(nent) << 24) |
mlx4_to_hw_uar_index(dev, uar->index));
cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
cq_context->mtt_base_addr_h = mtt_addr >> 32;
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
cq_context->db_rec_addr = cpu_to_be64(db_rec);
if (sw_cq_init) {
if (user_cq) {
err = mlx4_init_user_cqes(buf_addr, nent,
dev->caps.cqe_size);
if (err)
sw_cq_init = false;
} else {
mlx4_init_kernel_cqes(buf_addr, nent,
dev->caps.cqe_size);
}
}
err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err_radix;
cq->cons_index = 0;
cq->arm_sn = 1;
cq->uar = uar;
refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
&priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
return 0;
err_radix:
spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
int err;
err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock(&cq_table->lock);
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
mlx4_cq_free_icm(dev, cq->cqn);
}
EXPORT_SYMBOL_GPL(mlx4_cq_free);
int mlx4_init_cq_table(struct mlx4_dev *dev)
{
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
if (mlx4_is_slave(dev))
return 0;
return mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
}
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
if (mlx4_is_slave(dev))
return;
/* Nothing to do to clean up radix_tree */
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/cq.c
|
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type)
{
u64 out_param;
int err = 0;
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err) {
mlx4_err(dev, "Sense command failed for port: %d\n", port);
return err;
}
if (out_param > 2) {
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
return -EINVAL;
}
*type = out_param;
return 0;
}
void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *stype,
enum mlx4_port_type *defaults)
{
struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
int err;
int i;
for (i = 1; i <= dev->caps.num_ports; i++) {
stype[i - 1] = 0;
if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
if (err)
stype[i - 1] = defaults[i - 1];
} else
stype[i - 1] = defaults[i - 1];
}
/*
* If sensed nothing, remain in current configuration.
*/
for (i = 0; i < dev->caps.num_ports; i++)
stype[i] = stype[i] ? stype[i] : defaults[i];
}
static void mlx4_sense_port(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
sense_poll);
struct mlx4_dev *dev = sense->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
enum mlx4_port_type stype[MLX4_MAX_PORTS];
mutex_lock(&priv->port_mutex);
mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
if (mlx4_check_port_params(dev, stype))
goto sense_again;
if (mlx4_change_port_types(dev, stype))
mlx4_err(dev, "Failed to change port_types\n");
sense_again:
mutex_unlock(&priv->port_mutex);
queue_delayed_work(mlx4_wq , &sense->sense_poll,
round_jiffies_relative(MLX4_SENSE_RANGE));
}
void mlx4_start_sense(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_sense *sense = &priv->sense;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
return;
queue_delayed_work(mlx4_wq , &sense->sense_poll,
round_jiffies_relative(MLX4_SENSE_RANGE));
}
void mlx4_stop_sense(struct mlx4_dev *dev)
{
cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
}
void mlx4_sense_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_sense *sense = &priv->sense;
int port;
sense->dev = dev;
for (port = 1; port <= dev->caps.num_ports; port++)
sense->do_sense_port[port] = 1;
INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/sense.c
|
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
#include <asm/page.h>
#include "mlx4.h"
#include "icm.h"
enum {
MLX4_NUM_RESERVED_UARS = 8
};
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
*pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
if (*pdn == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn)
{
mlx4_bitmap_free(&mlx4_priv(dev)->pd_bitmap, pdn, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_pd_free);
int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
*xrcdn = mlx4_bitmap_alloc(&priv->xrcd_bitmap);
if (*xrcdn == -1)
return -ENOMEM;
return 0;
}
int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn)
{
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
err = mlx4_cmd_imm(dev, 0, &out_param,
RES_XRCD, RES_OP_RESERVE,
MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
return err;
*xrcdn = get_param_l(&out_param);
return 0;
}
return __mlx4_xrcd_alloc(dev, xrcdn);
}
EXPORT_SYMBOL_GPL(mlx4_xrcd_alloc);
void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
mlx4_bitmap_free(&mlx4_priv(dev)->xrcd_bitmap, xrcdn, MLX4_USE_RR);
}
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
{
u64 in_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, xrcdn);
err = mlx4_cmd(dev, in_param, RES_XRCD,
RES_OP_RESERVE, MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
mlx4_warn(dev, "Failed to release xrcdn %d\n", xrcdn);
} else
__mlx4_xrcd_free(dev, xrcdn);
}
EXPORT_SYMBOL_GPL(mlx4_xrcd_free);
int mlx4_init_pd_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
(1 << NOT_MASKED_PD_BITS) - 1,
dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
{
mlx4_bitmap_cleanup(&mlx4_priv(dev)->pd_bitmap);
}
int mlx4_init_xrcd_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->xrcd_bitmap, (1 << 16),
(1 << 16) - 1, dev->caps.reserved_xrcds + 1, 0);
}
void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
{
mlx4_bitmap_cleanup(&mlx4_priv(dev)->xrcd_bitmap);
}
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
int offset;
uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
if (uar->index == -1)
return -ENOMEM;
if (mlx4_is_slave(dev))
offset = uar->index % ((int)pci_resource_len(dev->persist->pdev,
2) /
dev->caps.uar_page_size);
else
offset = uar->index;
uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
+ offset;
uar->map = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
mlx4_bitmap_free(&mlx4_priv(dev)->uar_table.bitmap, uar->index, MLX4_USE_RR);
}
EXPORT_SYMBOL_GPL(mlx4_uar_free);
int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_uar *uar;
int err = 0;
int idx;
if (!priv->bf_mapping)
return -ENOMEM;
mutex_lock(&priv->bf_mutex);
if (!list_empty(&priv->bf_list))
uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
else {
if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
err = -ENOMEM;
goto out;
}
uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
if (!uar) {
uar = kmalloc(sizeof(*uar), GFP_KERNEL);
if (!uar) {
err = -ENOMEM;
goto out;
}
}
err = mlx4_uar_alloc(dev, uar);
if (err)
goto free_kmalloc;
uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
if (!uar->map) {
err = -ENOMEM;
goto free_uar;
}
uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
uar->index << PAGE_SHIFT,
PAGE_SIZE);
if (!uar->bf_map) {
err = -ENOMEM;
goto unamp_uar;
}
uar->free_bf_bmap = 0;
list_add(&uar->bf_list, &priv->bf_list);
}
idx = ffz(uar->free_bf_bmap);
uar->free_bf_bmap |= 1 << idx;
bf->uar = uar;
bf->offset = 0;
bf->buf_size = dev->caps.bf_reg_size / 2;
bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
list_del_init(&uar->bf_list);
goto out;
unamp_uar:
bf->uar = NULL;
iounmap(uar->map);
free_uar:
mlx4_uar_free(dev, uar);
free_kmalloc:
kfree(uar);
out:
mutex_unlock(&priv->bf_mutex);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int idx;
if (!bf->uar || !bf->uar->bf_map)
return;
mutex_lock(&priv->bf_mutex);
idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
bf->uar->free_bf_bmap &= ~(1 << idx);
if (!bf->uar->free_bf_bmap) {
if (!list_empty(&bf->uar->bf_list))
list_del(&bf->uar->bf_list);
io_mapping_unmap(bf->uar->bf_map);
iounmap(bf->uar->map);
mlx4_uar_free(dev, bf->uar);
kfree(bf->uar);
} else if (list_empty(&bf->uar->bf_list))
list_add(&bf->uar->bf_list, &priv->bf_list);
mutex_unlock(&priv->bf_mutex);
}
EXPORT_SYMBOL_GPL(mlx4_bf_free);
int mlx4_init_uar_table(struct mlx4_dev *dev)
{
int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
if (dev->caps.num_uars <= num_reserved_uar) {
mlx4_err(
dev, "Only %d UAR pages (need more than %d)\n",
dev->caps.num_uars, num_reserved_uar);
mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
return -ENODEV;
}
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
dev->caps.reserved_uars, 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
{
mlx4_bitmap_cleanup(&mlx4_priv(dev)->uar_table.bitmap);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/pd.c
|
/*
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/srq.h>
#include <linux/export.h>
#include <linux/gfp.h>
#include "mlx4.h"
#include "icm.h"
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
rcu_read_unlock();
if (srq)
refcount_inc(&srq->refcount);
else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
}
srq->event(srq, event_type);
if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
}
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
{
return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
int err;
*srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
if (*srqn == -1)
return -ENOMEM;
err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
return 0;
err_put:
mlx4_table_put(dev, &srq_table->table, *srqn);
err_out:
mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
return err;
}
static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
{
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
RES_OP_RESERVE_AND_MAP,
MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (!err)
*srqn = get_param_l(&out_param);
return err;
}
return __mlx4_srq_alloc_icm(dev, srqn);
}
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
mlx4_table_put(dev, &srq_table->table, srqn);
mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
}
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
{
u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, srqn);
if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
return;
}
__mlx4_srq_free_icm(dev, srqn);
}
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_srq_context *srq_context;
u64 mtt_addr;
int err;
err = mlx4_srq_alloc_icm(dev, &srq->srqn);
if (err)
return err;
spin_lock_irq(&srq_table->lock);
err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (err)
goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_radix;
}
srq_context = mailbox->buf;
srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
srq->srqn);
srq_context->logstride = srq->wqe_shift - 4;
srq_context->xrcd = cpu_to_be16(xrcd);
srq_context->pg_offset_cqn = cpu_to_be32(cqn & 0xffffff);
srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
srq_context->mtt_base_addr_h = mtt_addr >> 32;
srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
srq_context->pd = cpu_to_be32(pdn);
srq_context->db_rec_addr = cpu_to_be64(db_rec);
err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err_radix;
refcount_set(&srq->refcount, 1);
init_completion(&srq->free);
return 0;
err_radix:
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
err_icm:
mlx4_srq_free_icm(dev, srq->srqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
int err;
err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
if (err)
mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
spin_lock_irq(&srq_table->lock);
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
wait_for_completion(&srq->free);
mlx4_srq_free_icm(dev, srq->srqn);
}
EXPORT_SYMBOL_GPL(mlx4_srq_free);
int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
{
return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
}
EXPORT_SYMBOL_GPL(mlx4_srq_arm);
int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_srq_context *srq_context;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
srq_context = mailbox->buf;
err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
if (err)
goto err_out;
*limit_watermark = be16_to_cpu(srq_context->limit_watermark);
err_out:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_query);
int mlx4_init_srq_table(struct mlx4_dev *dev)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
if (mlx4_is_slave(dev))
return 0;
return mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
}
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
{
if (mlx4_is_slave(dev))
return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}
struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (dev->caps.num_srqs - 1));
rcu_read_unlock();
return srq;
}
EXPORT_SYMBOL_GPL(mlx4_srq_lookup);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/srq.c
|
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
{
int o;
int m;
u32 seg;
spin_lock(&buddy->lock);
for (o = order; o <= buddy->max_order; ++o)
if (buddy->num_free[o]) {
m = 1 << (buddy->max_order - o);
seg = find_first_bit(buddy->bits[o], m);
if (seg < m)
goto found;
}
spin_unlock(&buddy->lock);
return -1;
found:
clear_bit(seg, buddy->bits[o]);
--buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
set_bit(seg ^ 1, buddy->bits[o]);
++buddy->num_free[o];
}
spin_unlock(&buddy->lock);
seg <<= order;
return seg;
}
static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
{
seg >>= order;
spin_lock(&buddy->lock);
while (test_bit(seg ^ 1, buddy->bits[order])) {
clear_bit(seg ^ 1, buddy->bits[order]);
--buddy->num_free[order];
seg >>= 1;
++order;
}
set_bit(seg, buddy->bits[order]);
++buddy->num_free[order];
spin_unlock(&buddy->lock);
}
static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
{
int i, s;
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
GFP_KERNEL);
buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free),
GFP_KERNEL);
if (!buddy->bits || !buddy->num_free)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
if (!buddy->bits[i])
goto err_out_free;
}
set_bit(0, buddy->bits[buddy->max_order]);
buddy->num_free[buddy->max_order] = 1;
return 0;
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
kvfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
kfree(buddy->num_free);
return -ENOMEM;
}
static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
{
int i;
for (i = 0; i <= buddy->max_order; ++i)
kvfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
}
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
u32 seg;
int seg_order;
u32 offset;
seg_order = max_t(int, order - log_mtts_per_seg, 0);
seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
if (seg == -1)
return -1;
offset = seg * (1 << log_mtts_per_seg);
if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
offset + (1 << order) - 1)) {
mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
return -1;
}
return offset;
}
static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
u64 in_param = 0;
u64 out_param;
int err;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, order);
err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
RES_OP_RESERVE_AND_MAP,
MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
if (err)
return -1;
return get_param_l(&out_param);
}
return __mlx4_alloc_mtt_range(dev, order);
}
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
struct mlx4_mtt *mtt)
{
int i;
if (!npages) {
mtt->order = -1;
mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
return 0;
} else
mtt->page_shift = page_shift;
for (mtt->order = 0, i = 1; i < npages; i <<= 1)
++mtt->order;
mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
if (mtt->offset == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_init);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{
u32 first_seg;
int seg_order;
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
seg_order = max_t(int, order - log_mtts_per_seg, 0);
first_seg = offset / (1 << log_mtts_per_seg);
mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
offset + (1 << order) - 1);
}
static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{
u64 in_param = 0;
int err;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, offset);
set_param_h(&in_param, order);
err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
if (err)
mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
offset, order);
return;
}
__mlx4_free_mtt_range(dev, offset, order);
}
void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
if (mtt->order < 0)
return;
mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
}
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
return (u64) mtt->offset * dev->caps.mtt_entry_sz;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
static u32 hw_index_to_key(u32 ind)
{
return (ind >> 24) | (ind << 8);
}
static u32 key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd(dev, mailbox->dma, mpt_index,
0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
!mailbox, MLX4_CMD_HW2SW_MPT,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
/* Must protect against concurrent access */
int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
struct mlx4_mpt_entry ***mpt_entry)
{
int err;
int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
struct mlx4_cmd_mailbox *mailbox = NULL;
if (mmr->enabled != MLX4_MPT_EN_HW)
return -EINVAL;
err = mlx4_HW2SW_MPT(dev, NULL, key);
if (err) {
mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
return err;
}
mmr->enabled = MLX4_MPT_EN_SW;
if (!mlx4_is_mfunc(dev)) {
**mpt_entry = mlx4_table_find(
&mlx4_priv(dev)->mr_table.dmpt_table,
key, NULL);
} else {
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
0, MLX4_CMD_QUERY_MPT,
MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err)
goto free_mailbox;
*mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf;
}
if (!(*mpt_entry) || !(**mpt_entry)) {
err = -ENOMEM;
goto free_mailbox;
}
return 0;
free_mailbox:
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt);
int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
struct mlx4_mpt_entry **mpt_entry)
{
int err;
if (!mlx4_is_mfunc(dev)) {
/* Make sure any changes to this entry are flushed */
wmb();
*(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW;
/* Make sure the new status is written */
wmb();
err = mlx4_SYNC_TPT(dev);
} else {
int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
struct mlx4_cmd_mailbox *mailbox =
container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
buf);
(*mpt_entry)->lkey = 0;
err = mlx4_SW2HW_MPT(dev, mailbox, key);
}
if (!err) {
mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
mmr->enabled = MLX4_MPT_EN_HW;
}
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
struct mlx4_mpt_entry **mpt_entry)
{
if (mlx4_is_mfunc(dev)) {
struct mlx4_cmd_mailbox *mailbox =
container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
buf);
mlx4_free_cmd_mailbox(dev, mailbox);
}
}
EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
u32 pdn)
{
u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
/* The wrapper function will put the slave's id here */
if (mlx4_is_mfunc(dev))
pd_flags &= ~MLX4_MPT_PD_VF_MASK;
mpt_entry->pd_flags = cpu_to_be32(pd_flags |
(pdn & MLX4_MPT_PD_MASK)
| MLX4_MPT_PD_FLAG_EN_INV);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd);
int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
struct mlx4_mpt_entry *mpt_entry,
u32 access)
{
u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) |
(access & MLX4_PERM_MASK);
mpt_entry->flags = cpu_to_be32(flags);
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access);
static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
u64 iova, u64 size, u32 access, int npages,
int page_shift, struct mlx4_mr *mr)
{
mr->iova = iova;
mr->size = size;
mr->pd = pd;
mr->access = access;
mr->enabled = MLX4_MPT_DISABLED;
mr->key = hw_index_to_key(mridx);
return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
}
static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
struct mlx4_cmd_mailbox *mailbox,
int num_entries)
{
return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int __mlx4_mpt_reserve(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
}
static int mlx4_mpt_reserve(struct mlx4_dev *dev)
{
u64 out_param;
if (mlx4_is_mfunc(dev)) {
if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
return -1;
return get_param_l(&out_param);
}
return __mlx4_mpt_reserve(dev);
}
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
}
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
{
u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, index);
if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
mlx4_warn(dev, "Failed to release mr index:%d\n",
index);
return;
}
__mlx4_mpt_release(dev, index);
}
int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
return mlx4_table_get(dev, &mr_table->dmpt_table, index);
}
static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
u64 param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(¶m, index);
return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
MLX4_CMD_ALLOC_RES,
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
return __mlx4_mpt_alloc_icm(dev, index);
}
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
mlx4_table_put(dev, &mr_table->dmpt_table, index);
}
static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
{
u64 in_param = 0;
if (mlx4_is_mfunc(dev)) {
set_param_l(&in_param, index);
if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED))
mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
index);
return;
}
return __mlx4_mpt_free_icm(dev, index);
}
int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
int npages, int page_shift, struct mlx4_mr *mr)
{
u32 index;
int err;
index = mlx4_mpt_reserve(dev);
if (index == -1)
return -ENOMEM;
err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
access, npages, page_shift, mr);
if (err)
mlx4_mpt_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
int err;
if (mr->enabled == MLX4_MPT_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err) {
mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
err);
return err;
}
mr->enabled = MLX4_MPT_EN_SW;
}
mlx4_mtt_cleanup(dev, &mr->mtt);
return 0;
}
int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
int ret;
ret = mlx4_mr_free_reserved(dev, mr);
if (ret)
return ret;
if (mr->enabled)
mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
mlx4_mpt_release(dev, key_to_hw_index(mr->key));
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
mlx4_mtt_cleanup(dev, &mr->mtt);
mr->mtt.order = -1;
}
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
u64 iova, u64 size, int npages,
int page_shift, struct mlx4_mpt_entry *mpt_entry)
{
int err;
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err)
return err;
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(size);
mpt_entry->entity_size = cpu_to_be32(page_shift);
mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
MLX4_MPT_FLAG_SW_OWNS));
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
mpt_entry->mtt_addr = 0;
} else {
mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
&mr->mtt));
if (mr->mtt.page_shift == 0)
mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
}
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
/* fast register MR in free state */
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
mr->enabled = MLX4_MPT_EN_SW;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_table;
}
mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
MLX4_MPT_FLAG_REGION |
mr->access);
mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->start = cpu_to_be64(mr->iova);
mpt_entry->length = cpu_to_be64(mr->size);
mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
mpt_entry->mtt_addr = 0;
} else {
mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
&mr->mtt));
}
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
/* fast register MR in free state */
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
err = mlx4_SW2HW_MPT(dev, mailbox,
key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
if (err) {
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
mr->enabled = MLX4_MPT_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
return 0;
err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list)
{
struct mlx4_priv *priv = mlx4_priv(dev);
__be64 *mtts;
dma_addr_t dma_handle;
int i;
mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
start_index, &dma_handle);
if (!mtts)
return -ENOMEM;
dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE);
return 0;
}
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list)
{
int err = 0;
int chunk;
int mtts_per_page;
int max_mtts_first_page;
/* compute how may mtts fit in the first page */
mtts_per_page = PAGE_SIZE / sizeof(u64);
max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
% mtts_per_page;
chunk = min_t(int, max_mtts_first_page, npages);
while (npages > 0) {
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
if (err)
return err;
npages -= chunk;
start_index += chunk;
page_list += chunk;
chunk = min_t(int, mtts_per_page, npages);
}
return err;
}
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list)
{
struct mlx4_cmd_mailbox *mailbox = NULL;
__be64 *inbox = NULL;
int chunk;
int err = 0;
int i;
if (mtt->order < 0)
return -EINVAL;
if (mlx4_is_mfunc(dev)) {
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
while (npages > 0) {
chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
npages);
inbox[0] = cpu_to_be64(mtt->offset + start_index);
inbox[1] = 0;
for (i = 0; i < chunk; ++i)
inbox[i + 2] = cpu_to_be64(page_list[i] |
MLX4_MTT_FLAG_PRESENT);
err = mlx4_WRITE_MTT(dev, mailbox, chunk);
if (err) {
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
npages -= chunk;
start_index += chunk;
page_list += chunk;
}
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
}
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf)
{
u64 *page_list;
int err;
int i;
page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
for (i = 0; i < buf->npages; ++i)
if (buf->nbufs == 1)
page_list[i] = buf->direct.map + (i << buf->page_shift);
else
page_list[i] = buf->page_list[i].map;
err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
kfree(page_list);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
struct mlx4_mw *mw)
{
u32 index;
if ((type == MLX4_MW_TYPE_1 &&
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
(type == MLX4_MW_TYPE_2 &&
!(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
return -EOPNOTSUPP;
index = mlx4_mpt_reserve(dev);
if (index == -1)
return -ENOMEM;
mw->key = hw_index_to_key(index);
mw->pd = pd;
mw->type = type;
mw->enabled = MLX4_MPT_DISABLED;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
{
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
if (err)
return err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_table;
}
mpt_entry = mailbox->buf;
/* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
* off, thus creating a memory window and not a memory region.
*/
mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
mpt_entry->pd_flags = cpu_to_be32(mw->pd);
if (mw->type == MLX4_MW_TYPE_2) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
}
err = mlx4_SW2HW_MPT(dev, mailbox,
key_to_hw_index(mw->key) &
(dev->caps.num_mpts - 1));
if (err) {
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
mw->enabled = MLX4_MPT_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
return 0;
err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mw_enable);
void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
{
int err;
if (mw->enabled == MLX4_MPT_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mw->key) &
(dev->caps.num_mpts - 1));
if (err)
mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
mw->enabled = MLX4_MPT_EN_SW;
}
if (mw->enabled)
mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
mlx4_mpt_release(dev, key_to_hw_index(mw->key));
}
EXPORT_SYMBOL_GPL(mlx4_mw_free);
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
/* Nothing to do for slaves - all MR handling is forwarded
* to the master */
if (mlx4_is_slave(dev))
return 0;
if (!is_power_of_2(dev->caps.num_mpts))
return -EINVAL;
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
ilog2((u32)dev->caps.num_mtts /
(1 << log_mtts_per_seg)));
if (err)
goto err_buddy;
if (dev->caps.reserved_mtts) {
priv->reserved_mtts =
mlx4_alloc_mtt_range(dev,
fls(dev->caps.reserved_mtts - 1));
if (priv->reserved_mtts < 0) {
mlx4_warn(dev, "MTT table of order %u is too small\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
goto err_reserve_mtts;
}
}
return 0;
err_reserve_mtts:
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
err_buddy:
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
return err;
}
void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_mr_table *mr_table = &priv->mr_table;
if (mlx4_is_slave(dev))
return;
if (priv->reserved_mtts >= 0)
mlx4_free_mtt_range(dev, priv->reserved_mtts,
fls(dev->caps.reserved_mtts - 1));
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx4/mr.c
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright (c) 2013-2021, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#include "mlx5_core.h"
#include "lib/eq.h"
#include "fpga/core.h"
#include "eswitch.h"
#include "lib/clock.h"
#include "diag/fw_tracer.h"
#include "mlx5_irq.h"
#include "pci_irq.h"
#include "devlink.h"
#include "en_accel/ipsec.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
};
enum {
MLX5_EQ_STATE_ARMED = 0x9,
MLX5_EQ_STATE_FIRED = 0xa,
MLX5_EQ_STATE_ALWAYS_ARMED = 0xb,
};
enum {
MLX5_EQ_DOORBEL_OFFSET = 0x40,
};
/* budget must be smaller than MLX5_NUM_SPARE_EQE to guarantee that we update
* the ci before we polled all the entries in the EQ. MLX5_NUM_SPARE_EQE is
* used to set the EQ size, budget must be smaller than the EQ size.
*/
enum {
MLX5_EQ_POLLING_BUDGET = 128,
};
static_assert(MLX5_EQ_POLLING_BUDGET <= MLX5_NUM_SPARE_EQE);
struct mlx5_eq_table {
struct xarray comp_eqs;
struct mlx5_eq_async pages_eq;
struct mlx5_eq_async cmd_eq;
struct mlx5_eq_async async_eq;
struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
/* Since CQ DB is stored in async_eq */
struct mlx5_nb cq_err_nb;
struct mutex lock; /* sync async eqs creations */
struct mutex comp_lock; /* sync comp eqs creations */
int curr_comp_eqs;
int max_comp_eqs;
struct mlx5_irq_table *irq_table;
struct xarray comp_irqs;
struct mlx5_irq *ctrl_irq;
struct cpu_rmap *rmap;
struct cpumask used_cpus;
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
(1ull << MLX5_EVENT_TYPE_COMM_EST) | \
(1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
(1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
(1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
(1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
(1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {};
MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
MLX5_SET(destroy_eq_in, in, eq_number, eqn);
return mlx5_cmd_exec_in(dev, destroy_eq, in);
}
/* caller must eventually call mlx5_cq_put on the returned cq */
static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *cq = NULL;
rcu_read_lock();
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
mlx5_cq_hold(cq);
rcu_read_unlock();
return cq;
}
static int mlx5_eq_comp_int(struct notifier_block *nb,
__always_unused unsigned long action,
__always_unused void *data)
{
struct mlx5_eq_comp *eq_comp =
container_of(nb, struct mlx5_eq_comp, irq_nb);
struct mlx5_eq *eq = &eq_comp->core;
struct mlx5_eqe *eqe;
int num_eqes = 0;
u32 cqn = -1;
eqe = next_eqe_sw(eq);
if (!eqe)
goto out;
do {
struct mlx5_core_cq *cq;
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
/* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
cq = mlx5_eq_cq_get(eq, cqn);
if (likely(cq)) {
++cq->arm_sn;
cq->comp(cq, eqe);
mlx5_cq_put(cq);
} else {
dev_dbg_ratelimited(eq->dev->device,
"Completion event for bogus CQ 0x%x\n", cqn);
}
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
out:
eq_update_ci(eq, 1);
if (cqn != -1)
tasklet_schedule(&eq_comp->tasklet_ctx.task);
return 0;
}
/* Some architectures don't latch interrupts when they are disabled, so using
* mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
* avoid losing them. It is not recommended to use it, unless this is the last
* resort.
*/
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
{
u32 count_eqe;
disable_irq(eq->core.irqn);
count_eqe = eq->core.cons_index;
mlx5_eq_comp_int(&eq->irq_nb, 0, NULL);
count_eqe = eq->core.cons_index - count_eqe;
enable_irq(eq->core.irqn);
return count_eqe;
}
static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
unsigned long *flags)
__acquires(&eq->lock)
{
if (!recovery)
spin_lock(&eq->lock);
else
spin_lock_irqsave(&eq->lock, *flags);
}
static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
unsigned long *flags)
__releases(&eq->lock)
{
if (!recovery)
spin_unlock(&eq->lock);
else
spin_unlock_irqrestore(&eq->lock, *flags);
}
enum async_eq_nb_action {
ASYNC_EQ_IRQ_HANDLER = 0,
ASYNC_EQ_RECOVER = 1,
};
static int mlx5_eq_async_int(struct notifier_block *nb,
unsigned long action, void *data)
{
struct mlx5_eq_async *eq_async =
container_of(nb, struct mlx5_eq_async, irq_nb);
struct mlx5_eq *eq = &eq_async->core;
struct mlx5_eq_table *eqt;
struct mlx5_core_dev *dev;
struct mlx5_eqe *eqe;
unsigned long flags;
int num_eqes = 0;
bool recovery;
dev = eq->dev;
eqt = dev->priv.eq_table;
recovery = action == ASYNC_EQ_RECOVER;
mlx5_eq_async_int_lock(eq_async, recovery, &flags);
eqe = next_eqe_sw(eq);
if (!eqe)
goto out;
do {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
} while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
out:
eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
return unlikely(recovery) ? num_eqes : 0;
}
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
{
struct mlx5_eq_async *eq = &dev->priv.eq_table->cmd_eq;
int eqes;
eqes = mlx5_eq_async_int(&eq->irq_nb, ASYNC_EQ_RECOVER, NULL);
if (eqes)
mlx5_core_warn(dev, "Recovered %d EQEs on cmd_eq\n", eqes);
}
static void init_eq_buf(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
int i;
for (i = 0; i < eq_get_size(eq); i++) {
eqe = get_eqe(eq, i);
eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
}
}
static int
create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_eq_param *param)
{
u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE);
struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
struct mlx5_priv *priv = &dev->priv;
__be64 *pas;
u16 vecidx;
void *eqc;
int inlen;
u32 *in;
int err;
int i;
/* Init CQ table */
memset(cq_table, 0, sizeof(*cq_table));
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
eq->cons_index = 0;
err = mlx5_frag_buf_alloc_node(dev, wq_get_byte_sz(log_eq_size, log_eq_stride),
&eq->frag_buf, dev->priv.numa_node);
if (err)
return err;
mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq);
eq->irq = param->irq;
vecidx = mlx5_irq_get_index(eq->irq);
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_buf;
}
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
mlx5_fill_page_frag_array(&eq->frag_buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx))
MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID);
for (i = 0; i < 4; i++)
MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i,
param->mask[i]);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, eq->fbc.log_sz);
MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
MLX5_SET(eqc, eqc, intr, vecidx);
MLX5_SET(eqc, eqc, log_page_size,
eq->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (err)
goto err_in;
eq->vecidx = vecidx;
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev;
eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
err = mlx5_debug_eq_add(dev, eq);
if (err)
goto err_eq;
kvfree(in);
return 0;
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
err_in:
kvfree(in);
err_buf:
mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
}
/**
* mlx5_eq_enable - Enable EQ for receiving EQEs
* @dev : Device which owns the eq
* @eq : EQ to enable
* @nb : Notifier call block
*
* Must be called after EQ is created in device.
*
* @return: 0 if no error
*/
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{
int err;
err = mlx5_irq_attach_nb(eq->irq, nb);
if (!err)
eq_update_ci(eq, 1);
return err;
}
EXPORT_SYMBOL(mlx5_eq_enable);
/**
* mlx5_eq_disable - Disable EQ for receiving EQEs
* @dev : Device which owns the eq
* @eq : EQ to disable
* @nb : Notifier call block
*
* Must be called before EQ is destroyed.
*/
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{
mlx5_irq_detach_nb(eq->irq, nb);
}
EXPORT_SYMBOL(mlx5_eq_disable);
static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
int err;
mlx5_debug_eq_remove(dev, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
}
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &eq->cq_table;
int err;
spin_lock(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
spin_unlock(&table->lock);
return err;
}
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{
struct mlx5_cq_table *table = &eq->cq_table;
struct mlx5_core_cq *tmp;
spin_lock(&table->lock);
tmp = radix_tree_delete(&table->tree, cq->cqn);
spin_unlock(&table->lock);
if (!tmp) {
mlx5_core_dbg(eq->dev, "cq 0x%x not found in eq 0x%x tree\n",
eq->eqn, cq->cqn);
return;
}
if (tmp != cq)
mlx5_core_dbg(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n",
eq->eqn, cq->cqn);
}
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table;
int i;
eq_table = kvzalloc_node(sizeof(*eq_table), GFP_KERNEL,
dev->priv.numa_node);
if (!eq_table)
return -ENOMEM;
dev->priv.eq_table = eq_table;
mlx5_eq_debugfs_init(dev);
mutex_init(&eq_table->lock);
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
eq_table->irq_table = mlx5_irq_table_get(dev);
cpumask_clear(&eq_table->used_cpus);
xa_init(&eq_table->comp_eqs);
xa_init(&eq_table->comp_irqs);
mutex_init(&eq_table->comp_lock);
eq_table->curr_comp_eqs = 0;
return 0;
}
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
mlx5_eq_debugfs_cleanup(dev);
xa_destroy(&table->comp_irqs);
xa_destroy(&table->comp_eqs);
kvfree(table);
}
/* Async EQs */
static int create_async_eq(struct mlx5_core_dev *dev,
struct mlx5_eq *eq, struct mlx5_eq_param *param)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
mutex_lock(&eq_table->lock);
err = create_map_eq(dev, eq, param);
mutex_unlock(&eq_table->lock);
return err;
}
static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
mutex_lock(&eq_table->lock);
err = destroy_unmap_eq(dev, eq);
mutex_unlock(&eq_table->lock);
return err;
}
static int cq_err_event_notifier(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_eq_table *eqt;
struct mlx5_core_cq *cq;
struct mlx5_eqe *eqe;
struct mlx5_eq *eq;
u32 cqn;
/* type == MLX5_EVENT_TYPE_CQ_ERROR */
eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
eq = &eqt->async_eq.core;
eqe = data;
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
cqn, eqe->data.cq_err.syndrome);
cq = mlx5_eq_cq_get(eq, cqn);
if (unlikely(!cq)) {
mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
return NOTIFY_OK;
}
if (cq->event)
cq->event(cq, type);
mlx5_cq_put(cq);
return NOTIFY_OK;
}
static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
{
__be64 *user_unaffiliated_events;
__be64 *user_affiliated_events;
int i;
user_affiliated_events =
MLX5_CAP_DEV_EVENT(dev, user_affiliated_events);
user_unaffiliated_events =
MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events);
for (i = 0; i < 4; i++)
mask[i] |= be64_to_cpu(user_affiliated_events[i] |
user_unaffiliated_events[i]);
}
static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
{
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
if (MLX5_VPORT_MANAGER(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
if (MLX5_CAP_GEN(dev, general_notification_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
if (MLX5_CAP_GEN(dev, port_module_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
else
mlx5_core_dbg(dev, "port_module_event is not set\n");
if (MLX5_PPS_CAP(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
if (MLX5_CAP_GEN(dev, fpga))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
(1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
if (MLX5_CAP_GEN_MAX(dev, dct))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
if (MLX5_CAP_GEN(dev, temp_warn_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
if (mlx5_eswitch_is_funcs_handler(dev))
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
if (MLX5_CAP_GEN_MAX(dev, vhca_state))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
gather_user_async_events(dev, mask);
}
static int
setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
struct mlx5_eq_param *param, const char *name)
{
int err;
eq->irq_nb.notifier_call = mlx5_eq_async_int;
spin_lock_init(&eq->lock);
err = create_async_eq(dev, &eq->core, param);
if (err) {
mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
return err;
}
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable %s EQ %d\n", name, err);
destroy_async_eq(dev, &eq->core);
}
return err;
}
static void cleanup_async_eq(struct mlx5_core_dev *dev,
struct mlx5_eq_async *eq, const char *name)
{
int err;
mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
err = destroy_async_eq(dev, &eq->core);
if (err)
mlx5_core_err(dev, "failed to destroy %s eq, err(%d)\n",
name, err);
}
static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
{
struct devlink *devlink = priv_to_devlink(dev);
union devlink_param_value val;
int err;
err = devl_param_driverinit_value_get(devlink,
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
&val);
if (!err)
return val.vu32;
mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
return MLX5_NUM_ASYNC_EQE;
}
static int create_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_param param = {};
int err;
/* All the async_eqs are using single IRQ, request one IRQ and share its
* index among all the async_eqs of this device.
*/
table->ctrl_irq = mlx5_ctrl_irq_request(dev);
if (IS_ERR(table->ctrl_irq))
return PTR_ERR(table->ctrl_irq);
MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
.irq = table->ctrl_irq,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
err = setup_async_eq(dev, &table->cmd_eq, ¶m, "cmd");
if (err)
goto err1;
mlx5_cmd_use_events(dev);
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
.irq = table->ctrl_irq,
.nent = async_eq_depth_devlink_param_get(dev),
};
gather_async_events_mask(dev, param.mask);
err = setup_async_eq(dev, &table->async_eq, ¶m, "async");
if (err)
goto err2;
param = (struct mlx5_eq_param) {
.irq = table->ctrl_irq,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
err = setup_async_eq(dev, &table->pages_eq, ¶m, "pages");
if (err)
goto err3;
return 0;
err3:
cleanup_async_eq(dev, &table->async_eq, "async");
err2:
mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1:
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
mlx5_ctrl_irq_release(table->ctrl_irq);
return err;
}
static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
cleanup_async_eq(dev, &table->pages_eq, "pages");
cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
mlx5_ctrl_irq_release(table->ctrl_irq);
}
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
{
return &dev->priv.eq_table->async_eq.core;
}
void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
{
synchronize_irq(dev->priv.eq_table->async_eq.core.irqn);
}
void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
{
synchronize_irq(dev->priv.eq_table->cmd_eq.core.irqn);
}
/* Generic EQ API for mlx5_core consumers
* Needed For RDMA ODP EQ for now
*/
struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq_param *param)
{
struct mlx5_eq *eq = kvzalloc_node(sizeof(*eq), GFP_KERNEL,
dev->priv.numa_node);
int err;
if (!eq)
return ERR_PTR(-ENOMEM);
param->irq = dev->priv.eq_table->ctrl_irq;
err = create_async_eq(dev, eq, param);
if (err) {
kvfree(eq);
eq = ERR_PTR(err);
}
return eq;
}
EXPORT_SYMBOL(mlx5_eq_create_generic);
int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
int err;
if (IS_ERR(eq))
return -EINVAL;
err = destroy_async_eq(dev, eq);
if (err)
goto out;
kvfree(eq);
out:
return err;
}
EXPORT_SYMBOL(mlx5_eq_destroy_generic);
struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
{
u32 ci = eq->cons_index + cc;
u32 nent = eq_get_size(eq);
struct mlx5_eqe *eqe;
eqe = get_eqe(eq, ci & (nent - 1));
eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe;
/* Make sure we read EQ entry contents after we've
* checked the ownership bit.
*/
if (eqe)
dma_rmb();
return eqe;
}
EXPORT_SYMBOL(mlx5_eq_get_eqe);
void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
{
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
u32 val;
eq->cons_index += cc;
val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
__raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
wmb();
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_irq *irq;
irq = xa_load(&table->comp_irqs, vecidx);
if (!irq)
return;
xa_erase(&table->comp_irqs, vecidx);
mlx5_irq_release_vector(irq);
}
static int mlx5_cpumask_default_spread(int numa_node, int index)
{
const struct cpumask *prev = cpu_none_mask;
const struct cpumask *mask;
int found_cpu = 0;
int i = 0;
int cpu;
rcu_read_lock();
for_each_numa_hop_mask(mask, numa_node) {
for_each_cpu_andnot(cpu, mask, prev) {
if (i++ == index) {
found_cpu = cpu;
goto spread_done;
}
}
prev = mask;
}
spread_done:
rcu_read_unlock();
return found_cpu;
}
static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
{
#ifdef CONFIG_RFS_ACCEL
#ifdef CONFIG_MLX5_SF
if (mlx5_core_is_sf(dev))
return dev->priv.parent_mdev->priv.eq_table->rmap;
#endif
return dev->priv.eq_table->rmap;
#else
return NULL;
#endif
}
static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct cpu_rmap *rmap;
struct mlx5_irq *irq;
int cpu;
rmap = mlx5_eq_table_get_pci_rmap(dev);
cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vecidx);
irq = mlx5_irq_request_vector(dev, cpu, vecidx, &rmap);
if (IS_ERR(irq))
return PTR_ERR(irq);
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_irq *irq;
irq = xa_load(&table->comp_irqs, vecidx);
if (!irq)
return;
xa_erase(&table->comp_irqs, vecidx);
mlx5_irq_affinity_irq_release(dev, irq);
}
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_irq *irq;
irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
if (IS_ERR(irq)) {
/* In case SF irq pool does not exist, fallback to the PF irqs*/
if (PTR_ERR(irq) == -ENOENT)
return comp_irq_request_pci(dev, vecidx);
return PTR_ERR(irq);
}
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}
static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx)
{
mlx5_core_is_sf(dev) ? comp_irq_release_sf(dev, vecidx) :
comp_irq_release_pci(dev, vecidx);
}
static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx)
{
return mlx5_core_is_sf(dev) ? comp_irq_request_sf(dev, vecidx) :
comp_irq_request_pci(dev, vecidx);
}
#ifdef CONFIG_RFS_ACCEL
static int alloc_rmap(struct mlx5_core_dev *mdev)
{
struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
/* rmap is a mapping between irq number and queue number.
* Each irq can be assigned only to a single rmap.
* Since SFs share IRQs, rmap mapping cannot function correctly
* for irqs that are shared between different core/netdev RX rings.
* Hence we don't allow netdev rmap for SFs.
*/
if (mlx5_core_is_sf(mdev))
return 0;
eq_table->rmap = alloc_irq_cpu_rmap(eq_table->max_comp_eqs);
if (!eq_table->rmap)
return -ENOMEM;
return 0;
}
static void free_rmap(struct mlx5_core_dev *mdev)
{
struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
if (eq_table->rmap) {
free_irq_cpu_rmap(eq_table->rmap);
eq_table->rmap = NULL;
}
}
#else
static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
static void free_rmap(struct mlx5_core_dev *mdev) {}
#endif
static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
xa_erase(&table->comp_eqs, vecidx);
mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
if (destroy_unmap_eq(dev, &eq->core))
mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
eq->core.eqn);
tasklet_disable(&eq->tasklet_ctx.task);
kfree(eq);
comp_irq_release(dev, vecidx);
table->curr_comp_eqs--;
}
static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
{
struct devlink *devlink = priv_to_devlink(dev);
union devlink_param_value val;
int err;
err = devl_param_driverinit_value_get(devlink,
DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
&val);
if (!err)
return val.vu32;
mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
return MLX5_COMP_EQ_SIZE;
}
/* Must be called with EQ table comp_lock held */
static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_param param = {};
struct mlx5_eq_comp *eq;
struct mlx5_irq *irq;
int nent;
int err;
lockdep_assert_held(&table->comp_lock);
if (table->curr_comp_eqs == table->max_comp_eqs) {
mlx5_core_err(dev, "maximum number of vectors is allocated, %d\n",
table->max_comp_eqs);
return -ENOMEM;
}
err = comp_irq_request(dev, vecidx);
if (err)
return err;
nent = comp_eq_depth_devlink_param_get(dev);
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, dev->priv.numa_node);
if (!eq) {
err = -ENOMEM;
goto clean_irq;
}
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
irq = xa_load(&table->comp_irqs, vecidx);
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
.irq = irq,
.nent = nent,
};
err = create_map_eq(dev, &eq->core, ¶m);
if (err)
goto clean_eq;
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
if (err) {
destroy_unmap_eq(dev, &eq->core);
goto clean_eq;
}
mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
err = xa_err(xa_store(&table->comp_eqs, vecidx, eq, GFP_KERNEL));
if (err)
goto disable_eq;
table->curr_comp_eqs++;
return eq->core.eqn;
disable_eq:
mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
clean_eq:
kfree(eq);
clean_irq:
comp_irq_release(dev, vecidx);
return err;
}
int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
int ret = 0;
mutex_lock(&table->comp_lock);
eq = xa_load(&table->comp_eqs, vecidx);
if (eq) {
*eqn = eq->core.eqn;
goto out;
}
ret = create_comp_eq(dev, vecidx);
if (ret < 0) {
mutex_unlock(&table->comp_lock);
return ret;
}
*eqn = ret;
out:
mutex_unlock(&table->comp_lock);
return 0;
}
EXPORT_SYMBOL(mlx5_comp_eqn_get);
int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
int eqn;
int err;
/* Allocate the EQ if not allocated yet */
err = mlx5_comp_eqn_get(dev, vector, &eqn);
if (err)
return err;
eq = xa_load(&table->comp_eqs, vector);
*irqn = eq->core.irqn;
return 0;
}
unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev)
{
return dev->priv.eq_table->max_comp_eqs;
}
EXPORT_SYMBOL(mlx5_comp_vectors_max);
static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
eq = xa_load(&table->comp_eqs, vector);
if (eq)
return mlx5_irq_get_affinity_mask(eq->core.irq);
return NULL;
}
int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
{
struct cpumask *mask;
int cpu;
mask = mlx5_comp_irq_get_affinity_mask(dev, vector);
if (mask)
cpu = cpumask_first(mask);
else
cpu = mlx5_cpumask_default_spread(dev->priv.numa_node, vector);
return cpu;
}
EXPORT_SYMBOL(mlx5_comp_vector_get_cpu);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{
return dev->priv.eq_table->rmap;
}
#endif
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
unsigned long index;
xa_for_each(&table->comp_eqs, index, eq)
if (eq->core.eqn == eqn)
return eq;
return ERR_PTR(-ENOENT);
}
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
mlx5_irq_table_free_irqs(dev);
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
#define MLX5_MAX_ASYNC_EQS 4
#else
#define MLX5_MAX_ASYNC_EQS 3
#endif
static int get_num_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int max_dev_eqs;
int max_eqs_sf;
int num_eqs;
/* If ethernet is disabled we use just a single completion vector to
* have the other vectors available for other drivers using mlx5_core. For
* example, mlx5_vdpa
*/
if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev))
return 1;
max_dev_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
max_dev_eqs - MLX5_MAX_ASYNC_EQS);
if (mlx5_core_is_sf(dev)) {
max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
num_eqs = min_t(int, num_eqs, max_eqs_sf);
}
return num_eqs;
}
int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
eq_table->max_comp_eqs = get_num_eqs(dev);
err = create_async_eqs(dev);
if (err) {
mlx5_core_err(dev, "Failed to create async EQs\n");
goto err_async_eqs;
}
err = alloc_rmap(dev);
if (err) {
mlx5_core_err(dev, "Failed to allocate rmap\n");
goto err_rmap;
}
return 0;
err_rmap:
destroy_async_eqs(dev);
err_async_eqs:
return err;
}
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_eq_comp *eq;
unsigned long index;
xa_for_each(&table->comp_eqs, index, eq)
destroy_comp_eq(dev, eq, index);
free_rmap(dev);
destroy_async_eqs(dev);
}
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_register);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/eq.c
|
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <net/flow_dissector.h>
#include <net/flow_offload.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
#include <net/bonding.h>
#include <net/dst_metadata.h>
#include "devlink.h"
#include "en.h"
#include "en/tc/post_act.h"
#include "en/tc/act_stats.h"
#include "en_rep.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
#include "en_tc.h"
#include "eswitch.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
#include "en/mapping.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "en/tc/act/act.h"
#include "en/tc/post_meter.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
#include "lag/lag.h"
#include "lag/mp.h"
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
struct mlx5e_tc_table {
/* Protects the dynamic assignment of the t parameter
* which is the nic tc root table.
*/
struct mutex t_lock;
struct mlx5e_priv *priv;
struct mlx5_flow_table *t;
struct mlx5_flow_table *miss_t;
struct mlx5_fs_chains *chains;
struct mlx5e_post_act *post_act;
struct rhashtable ht;
struct mod_hdr_tbl mod_hdr;
struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
struct netdev_net_notifier netdevice_nn;
struct mlx5_tc_ct_priv *ct;
struct mapping_ctx *mapping;
struct dentry *dfs_root;
/* tc action stats */
struct mlx5e_tc_act_stats_handle *action_stats_handle;
};
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[MAPPED_OBJ_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
.moffset = 0,
.mlen = 16,
},
[VPORT_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
.moffset = 16,
.mlen = 16,
},
[TUNNEL_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
.moffset = 8,
.mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
.soffset = MLX5_BYTE_OFF(fte_match_param,
misc_parameters_2.metadata_reg_c_1),
},
[ZONE_TO_REG] = zone_to_reg_ct,
[ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
[CTSTATE_TO_REG] = ctstate_to_reg_ct,
[MARK_TO_REG] = mark_to_reg_ct,
[LABELS_TO_REG] = labels_to_reg_ct,
[FTEID_TO_REG] = fteid_to_reg_ct,
/* For NIC rules we store the restore metadata directly
* into reg_b that is passed to SW since we don't
* jump between steering domains.
*/
[NIC_MAPPED_OBJ_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
.moffset = 0,
.mlen = 16,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
[PACKET_COLOR_TO_REG] = packet_color_to_reg,
};
struct mlx5e_tc_jump_state {
u32 jump_count;
bool jump_target;
struct mlx5_flow_attr *jumping_attr;
enum flow_action_id last_id;
u32 last_index;
};
struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
{
struct mlx5e_tc_table *tc;
tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
return tc ? tc : ERR_PTR(-ENOMEM);
}
void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
{
kvfree(tc);
}
struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
{
return tc->chains;
}
/* To avoid false lock dependency warning set the tc_ht lock
* class different than the lock class of the ht being used when deleting
* last flow from a group and then deleting a group, we get into del_sw_flow_group()
* which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
* it's different than the ht->mutex here.
*/
static struct lock_class_key tc_ht_lock_key;
static struct lock_class_key tc_ht_wq_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 val,
u32 mask)
{
void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
u32 max_mask = GENMASK(match_len - 1, 0);
__be32 curr_mask_be, curr_val_be;
u32 curr_mask, curr_val;
fmask = headers_c + soffset;
fval = headers_v + soffset;
memcpy(&curr_mask_be, fmask, 4);
memcpy(&curr_val_be, fval, 4);
curr_mask = be32_to_cpu(curr_mask_be);
curr_val = be32_to_cpu(curr_val_be);
//move to correct offset
WARN_ON(mask > max_mask);
mask <<= moffset;
val <<= moffset;
max_mask <<= moffset;
//zero val and mask
curr_mask &= ~max_mask;
curr_val &= ~max_mask;
//add current to mask
curr_mask |= mask;
curr_val |= val;
//back to be32 and write
curr_mask_be = cpu_to_be32(curr_mask);
curr_val_be = cpu_to_be32(curr_val);
memcpy(fmask, &curr_mask_be, 4);
memcpy(fval, &curr_val_be, 4);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
}
void
mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 *val,
u32 *mask)
{
void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
u32 max_mask = GENMASK(match_len - 1, 0);
__be32 curr_mask_be, curr_val_be;
u32 curr_mask, curr_val;
fmask = headers_c + soffset;
fval = headers_v + soffset;
memcpy(&curr_mask_be, fmask, 4);
memcpy(&curr_val_be, fval, 4);
curr_mask = be32_to_cpu(curr_mask_be);
curr_val = be32_to_cpu(curr_val_be);
*mask = (curr_mask >> moffset) & max_mask;
*val = (curr_val >> moffset) & max_mask;
}
int
mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data)
{
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
char *modact;
int err;
modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
if (IS_ERR(modact))
return PTR_ERR(modact);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 32)
mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field, mfield);
MLX5_SET(set_action_in, modact, offset, moffset);
MLX5_SET(set_action_in, modact, length, mlen);
MLX5_SET(set_action_in, modact, data, data);
err = mod_hdr_acts->num_actions;
mod_hdr_acts->num_actions++;
return err;
}
static struct mlx5e_tc_act_stats_handle *
get_act_stats_handle(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
if (is_mdev_switchdev_mode(priv->mdev)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
return uplink_priv->action_stats_handle;
}
return tc->action_stats_handle;
}
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
if (is_mdev_switchdev_mode(priv->mdev)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
return uplink_priv->int_port_priv;
}
return NULL;
}
struct mlx5e_flow_meters *
mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5e_priv *priv;
if (is_mdev_switchdev_mode(dev)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
priv = netdev_priv(uplink_rpriv->netdev);
if (!uplink_priv->flow_meters)
uplink_priv->flow_meters =
mlx5e_flow_meters_init(priv,
MLX5_FLOW_NAMESPACE_FDB,
uplink_priv->post_act);
if (!IS_ERR(uplink_priv->flow_meters))
return uplink_priv->flow_meters;
}
return NULL;
}
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
if (is_mdev_switchdev_mode(priv->mdev)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
return uplink_priv->ct_priv;
}
return tc->ct;
}
static struct mlx5e_tc_psample *
get_sample_priv(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
if (is_mdev_switchdev_mode(priv->mdev)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
return uplink_priv->tc_psample;
}
return NULL;
}
static struct mlx5e_post_act *
get_post_action(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
if (is_mdev_switchdev_mode(priv->mdev)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
return uplink_priv->post_act;
}
return tc->post_act;
}
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (is_mdev_switchdev_mode(priv->mdev))
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
}
void
mlx5_tc_rule_delete(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (is_mdev_switchdev_mode(priv->mdev)) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
return;
}
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
static bool
is_flow_meter_action(struct mlx5_flow_attr *attr)
{
return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
(attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) ||
attr->flags & MLX5_ATTR_FLAG_MTU);
}
static int
mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
struct mlx5e_post_act *post_act = get_post_action(priv);
struct mlx5e_post_meter_priv *post_meter;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_flow_meter_handle *meter;
enum mlx5e_post_meter_type type;
meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
if (IS_ERR(meter)) {
mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
return PTR_ERR(meter);
}
ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE;
post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
type,
meter->act_counter, meter->drop_counter,
attr->branch_true, attr->branch_false);
if (IS_ERR(post_meter)) {
mlx5_core_err(priv->mdev, "Failed to init post meter\n");
goto err_meter_init;
}
attr->meter_attr.meter = meter;
attr->meter_attr.post_meter = post_meter;
attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
err_meter_init:
mlx5e_tc_meter_put(meter);
return PTR_ERR(post_meter);
}
static void
mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter);
mlx5e_tc_meter_put(attr->meter_attr.meter);
}
struct mlx5_flow_handle *
mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err;
if (!is_mdev_switchdev_mode(priv->mdev))
return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
if (is_flow_meter_action(attr)) {
err = mlx5e_tc_add_flow_meter(priv, attr);
if (err)
return ERR_PTR(err);
}
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
void
mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (!is_mdev_switchdev_mode(priv->mdev)) {
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
return;
}
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
return;
}
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
if (attr->meter_attr.meter)
mlx5e_tc_del_flow_meter(esw, attr);
}
int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data)
{
int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
return ret < 0 ? ret : 0;
}
void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
enum mlx5e_tc_attr_to_reg type,
int act_id, u32 data)
{
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
char *modact;
modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 32)
mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field, mfield);
MLX5_SET(set_action_in, modact, offset, moffset);
MLX5_SET(set_action_in, modact, length, mlen);
MLX5_SET(set_action_in, modact, data, data);
}
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
struct mlx5_core_dev *func_mdev;
struct mlx5e_priv *func_priv;
u32 tdn;
struct mlx5e_tir direct_tir;
int num_channels;
u8 log_num_packets;
struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5_ttc_table *ttc;
};
struct mlx5e_hairpin_entry {
/* a node of a hash table which keeps all the hairpin entries */
struct hlist_node hairpin_hlist;
/* protects flows list */
spinlock_t flows_lock;
/* flows sharing the same hairpin */
struct list_head flows;
/* hpe's that were not fully initialized when dead peer update event
* function traversed them.
*/
struct list_head dead_peer_wait_list;
u16 peer_vhca_id;
u8 prio;
struct mlx5e_hairpin *hp;
refcount_t refcnt;
struct completion res_ready;
};
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
{
if (!flow || !refcount_inc_not_zero(&flow->refcnt))
return ERR_PTR(-EINVAL);
return flow;
}
void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
if (refcount_dec_and_test(&flow->refcnt)) {
mlx5e_tc_del_flow(priv, flow);
kfree_rcu(flow, rcu_head);
}
}
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, ESWITCH);
}
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, FT);
}
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, OFFLOADED);
}
int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
{
return mlx5e_is_eswitch_flow(flow) ?
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
}
static struct mlx5_core_dev *
get_flow_counter_dev(struct mlx5e_tc_flow *flow)
{
return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
}
static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
&tc->mod_hdr;
}
int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
mlx5e_get_flow_namespace(flow),
&attr->parse_attr->mod_hdr_acts);
if (IS_ERR(mh))
return PTR_ERR(mh);
WARN_ON(attr->modify_hdr);
attr->modify_hdr = mlx5e_mod_hdr_get(mh);
attr->mh = mh;
return 0;
}
void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
/* flow wasn't fully initialized */
if (!attr->mh)
return;
mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
attr->mh);
attr->mh = NULL;
}
static
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
{
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_priv *priv;
netdev = dev_get_by_index(net, ifindex);
if (!netdev)
return ERR_PTR(-ENODEV);
priv = netdev_priv(netdev);
mdev = priv->mdev;
dev_put(netdev);
/* Mirred tc action holds a refcount on the ifindex net_device (see
* net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
* after dev_put(netdev), while we're in the context of adding a tc flow.
*
* The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
* stored in a hairpin object, which exists until all flows, that refer to it, get
* removed.
*
* On the other hand, after a hairpin object has been created, the peer net_device may
* be removed/unbound while there are still some hairpin flows that are using it. This
* case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
* NETDEV_UNREGISTER event of the peer net_device.
*/
return mdev;
}
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
{
struct mlx5e_tir_builder *builder;
int err;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
if (err)
goto out;
mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
if (err)
goto create_tir_err;
out:
mlx5e_tir_builder_free(builder);
return err;
create_tir_err:
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
goto out;
}
static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
{
mlx5e_tir_destroy(&hp->direct_tir);
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
}
static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rss_params_indir *indir;
int err;
indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
if (!indir)
return -ENOMEM;
mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
indir);
kvfree(indir);
return err;
}
static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5e_rss_params_hash rss_hash;
enum mlx5_traffic_types tt, max_tt;
struct mlx5e_tir_builder *builder;
int err = 0;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
struct mlx5e_rss_params_traffic_type rss_tt;
rss_tt = mlx5e_rss_get_default_tt_config(tt);
mlx5e_tir_builder_build_rqt(builder, hp->tdn,
mlx5e_rqt_get_rqtn(&hp->indir_rqt),
false);
mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
if (err) {
mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_tirs;
}
mlx5e_tir_builder_clear(builder);
}
out:
mlx5e_tir_builder_free(builder);
return err;
err_destroy_tirs:
max_tt = tt;
for (tt = 0; tt < max_tt; tt++)
mlx5e_tir_destroy(&hp->indir_tir[tt]);
goto out;
}
static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
{
int tt;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
mlx5e_tir_destroy(&hp->indir_tir[tt]);
}
static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
tt == MLX5_TT_ANY ?
mlx5e_tir_get_tirn(&hp->direct_tir) :
mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
}
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_TC_PRIO;
}
static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct ttc_params ttc_params;
struct mlx5_ttc_table *ttc;
int err;
err = mlx5e_hairpin_create_indirect_rqt(hp);
if (err)
return err;
err = mlx5e_hairpin_create_indirect_tirs(hp);
if (err)
goto err_create_indirect_tirs;
mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
if (IS_ERR(hp->ttc)) {
err = PTR_ERR(hp->ttc);
goto err_create_ttc_table;
}
ttc = mlx5e_fs_get_ttc(priv->fs, false);
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels,
mlx5_get_ttc_flow_table(ttc)->id);
return 0;
err_create_ttc_table:
mlx5e_hairpin_destroy_indirect_tirs(hp);
err_create_indirect_tirs:
mlx5e_rqt_destroy(&hp->indir_rqt);
return err;
}
static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
{
mlx5_destroy_ttc_table(hp->ttc);
mlx5e_hairpin_destroy_indirect_tirs(hp);
mlx5e_rqt_destroy(&hp->indir_rqt);
}
static struct mlx5e_hairpin *
mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
int peer_ifindex)
{
struct mlx5_core_dev *func_mdev, *peer_mdev;
struct mlx5e_hairpin *hp;
struct mlx5_hairpin *pair;
int err;
hp = kzalloc(sizeof(*hp), GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
func_mdev = priv->mdev;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (IS_ERR(peer_mdev)) {
err = PTR_ERR(peer_mdev);
goto create_pair_err;
}
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
if (IS_ERR(pair)) {
err = PTR_ERR(pair);
goto create_pair_err;
}
hp->pair = pair;
hp->func_mdev = func_mdev;
hp->func_priv = priv;
hp->num_channels = params->num_channels;
hp->log_num_packets = params->log_num_packets;
err = mlx5e_hairpin_create_transport(hp);
if (err)
goto create_transport_err;
if (hp->num_channels > 1) {
err = mlx5e_hairpin_rss_init(hp);
if (err)
goto rss_init_err;
}
return hp;
rss_init_err:
mlx5e_hairpin_destroy_transport(hp);
create_transport_err:
mlx5_core_hairpin_destroy(hp->pair);
create_pair_err:
kfree(hp);
return ERR_PTR(err);
}
static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
{
if (hp->num_channels > 1)
mlx5e_hairpin_rss_cleanup(hp);
mlx5e_hairpin_destroy_transport(hp);
mlx5_core_hairpin_destroy(hp->pair);
kvfree(hp);
}
static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
{
return (peer_vhca_id << 16 | prio);
}
static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
u16 peer_vhca_id, u8 prio)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
hash_for_each_possible(tc->hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
return hpe;
}
}
return NULL;
}
static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
/* no more hairpin flows for us, release the hairpin pair */
if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
mutex_unlock(&tc->hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
dev_name(hpe->hp->pair->peer_mdev->device));
mlx5e_hairpin_destroy(hpe->hp);
}
WARN_ON(!list_empty(&hpe->flows));
kfree(hpe);
}
#define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, u8 *match_prio,
struct netlink_ext_ack *extack)
{
void *headers_c, *headers_v;
u8 prio_val, prio_mask = 0;
bool vlan_present;
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
NL_SET_ERR_MSG_MOD(extack,
"only PCP trust state supported for hairpin");
return -EOPNOTSUPP;
}
#endif
headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
if (vlan_present) {
prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
}
if (!vlan_present || !prio_mask) {
prio_val = UNKNOWN_MATCH_PRIO;
} else if (prio_mask != 0x7) {
NL_SET_ERR_MSG_MOD(extack,
"masked priority match not supported for hairpin");
return -EOPNOTSUPP;
}
*match_prio = prio_val;
return 0;
}
static int debugfs_hairpin_num_active_get(void *data, u64 *val)
{
struct mlx5e_tc_table *tc = data;
struct mlx5e_hairpin_entry *hpe;
u32 cnt = 0;
u32 bkt;
mutex_lock(&tc->hairpin_tbl_lock);
hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
cnt++;
mutex_unlock(&tc->hairpin_tbl_lock);
*val = cnt;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active,
debugfs_hairpin_num_active_get, NULL, "%llu\n");
static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv)
{
struct mlx5e_tc_table *tc = file->private;
struct mlx5e_hairpin_entry *hpe;
u32 bkt;
mutex_lock(&tc->hairpin_tbl_lock);
hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
seq_printf(file,
"Hairpin peer_vhca_id %u prio %u refcnt %u num_channels %u num_packets %lu\n",
hpe->peer_vhca_id, hpe->prio,
refcount_read(&hpe->refcnt), hpe->hp->num_channels,
BIT(hpe->hp->log_num_packets));
mutex_unlock(&tc->hairpin_tbl_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump);
static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
struct dentry *dfs_root)
{
if (IS_ERR_OR_NULL(dfs_root))
return;
tc->dfs_root = debugfs_create_dir("tc", dfs_root);
debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc,
&fops_hairpin_num_active);
debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc,
&debugfs_hairpin_table_dump_fops);
}
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct devlink *devlink = priv_to_devlink(priv->mdev);
int peer_ifindex = parse_attr->mirred_ifindex[0];
union devlink_param_value val = {};
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
struct mlx5e_hairpin_entry *hpe;
struct mlx5e_hairpin *hp;
u8 match_prio;
u16 peer_id;
int err;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (IS_ERR(peer_mdev)) {
NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
return PTR_ERR(peer_mdev);
}
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;
}
peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
extack);
if (err)
return err;
mutex_lock(&tc->hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
mutex_unlock(&tc->hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
err = -EREMOTEIO;
goto out_err;
}
goto attach_flow;
}
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
mutex_unlock(&tc->hairpin_tbl_lock);
return -ENOMEM;
}
spin_lock_init(&hpe->flows_lock);
INIT_LIST_HEAD(&hpe->flows);
INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
hpe->peer_vhca_id = peer_id;
hpe->prio = match_prio;
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&tc->hairpin_tbl_lock);
err = devl_param_driverinit_value_get(
devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, &val);
if (err) {
err = -ENOMEM;
goto out_err;
}
params.log_num_packets = ilog2(val.vu32);
params.log_data_size =
clamp_t(u32,
params.log_num_packets +
MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev),
MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.q_counter = priv->q_counter;
err = devl_param_driverinit_value_get(
devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val);
if (err) {
err = -ENOMEM;
goto out_err;
}
params.num_channels = val.vu32;
hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
hpe->hp = hp;
complete_all(&hpe->res_ready);
if (IS_ERR(hp)) {
err = PTR_ERR(hp);
goto out_err;
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
attach_flow:
if (hpe->hp->num_channels > 1) {
flow_flag_set(flow, HAIRPIN_RSS);
flow->attr->nic_attr->hairpin_ft =
mlx5_get_ttc_flow_table(hpe->hp->ttc);
} else {
flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
}
flow->hpe = hpe;
spin_lock(&hpe->flows_lock);
list_add(&flow->hairpin, &hpe->flows);
spin_unlock(&hpe->flows_lock);
return 0;
out_err:
mlx5e_hairpin_put(priv, hpe);
return err;
}
static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
/* flow wasn't fully initialized */
if (!flow->hpe)
return;
spin_lock(&flow->hpe->flows_lock);
list_del(&flow->hairpin);
spin_unlock(&flow->hpe->flows_lock);
mlx5e_hairpin_put(priv, flow->hpe);
flow->hpe = NULL;
}
struct mlx5_flow_handle *
mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_fs_chains *nic_chains;
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
};
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *ft;
int dest_ix = 0;
nic_chains = mlx5e_nic_chains(tc);
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = nic_attr->flow_tag;
if (attr->dest_ft) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dest_ix].ft = attr->dest_ft;
dest_ix++;
} else if (nic_attr->hairpin_ft) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dest_ix].ft = nic_attr->hairpin_ft;
dest_ix++;
} else if (nic_attr->hairpin_tirn) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
dest_ix++;
} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
if (attr->dest_chain) {
dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
attr->dest_chain, 1,
MLX5E_TC_FT_LEVEL);
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
}
dest_ix++;
}
if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
mutex_lock(&tc->t_lock);
if (IS_ERR_OR_NULL(tc->t)) {
/* Create the root table here if doesn't exist yet */
tc->t =
mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
if (IS_ERR(tc->t)) {
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
rule = ERR_CAST(tc->t);
goto err_ft_get;
}
}
mutex_unlock(&tc->t_lock);
if (attr->chain || attr->prio)
ft = mlx5_chains_get_table(nic_chains,
attr->chain, attr->prio,
MLX5E_TC_FT_LEVEL);
else
ft = attr->ft;
if (IS_ERR(ft)) {
rule = ERR_CAST(ft);
goto err_ft_get;
}
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(ft, spec,
&flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_rule;
return rule;
err_rule:
if (attr->chain || attr->prio)
mlx5_chains_put_table(nic_chains,
attr->chain, attr->prio,
MLX5E_TC_FT_LEVEL);
err_ft_get:
if (attr->dest_chain)
mlx5_chains_put_table(nic_chains,
attr->dest_chain, 1,
MLX5E_TC_FT_LEVEL);
return ERR_CAST(rule);
}
static int
alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
struct mlx5_flow_attr *attr)
{
struct mlx5_fc *counter;
counter = mlx5_fc_create(counter_dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
attr->counter = counter;
return 0;
}
static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_core_dev *dev = priv->mdev;
int err;
parse_attr = attr->parse_attr;
if (flow_flag_test(flow, HAIRPIN)) {
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
if (err)
return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
err = alloc_flow_attr_counter(dev, attr);
if (err)
return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
if (err)
return err;
}
flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, attr);
return PTR_ERR_OR_ZERO(flow->rule[0]);
}
void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_fs_chains *nic_chains;
nic_chains = mlx5e_nic_chains(tc);
mlx5_del_flow_rules(rule);
if (attr->chain || attr->prio)
mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
MLX5E_TC_FT_LEVEL);
if (attr->dest_chain)
mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
MLX5E_TC_FT_LEVEL);
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_flow_attr *attr = flow->attr;
flow_flag_clear(flow, OFFLOADED);
if (!IS_ERR_OR_NULL(flow->rule[0]))
mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
mutex_lock(&tc->t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
tc->t = NULL;
}
mutex_unlock(&tc->t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
mlx5e_tc_detach_mod_hdr(priv, flow, attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(priv->mdev, attr->counter);
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
free_flow_post_acts(flow);
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
}
struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_handle *rule;
if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
if (IS_ERR(rule))
return rule;
if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1]))
goto err_rule1;
}
return rule;
err_rule1:
mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
return flow->rule[1];
}
void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
}
struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec)
{
struct mlx5e_tc_mod_hdr_acts mod_acts = {};
struct mlx5e_mod_hdr_handle *mh = NULL;
struct mlx5_flow_attr *slow_attr;
struct mlx5_flow_handle *rule;
bool fwd_and_modify_cap;
u32 chain_mapping = 0;
int err;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
if (!slow_attr)
return ERR_PTR(-ENOMEM);
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
if (!fwd_and_modify_cap)
goto skip_restore;
err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
if (err)
goto err_get_chain;
err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
MAPPED_OBJ_TO_REG, chain_mapping);
if (err)
goto err_reg_set;
mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
if (IS_ERR(mh)) {
err = PTR_ERR(mh);
goto err_attach;
}
slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
skip_restore:
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto err_offload;
}
flow->attr->slow_mh = mh;
flow->chain_mapping = chain_mapping;
flow_flag_set(flow, SLOW);
mlx5e_mod_hdr_dealloc(&mod_acts);
kfree(slow_attr);
return rule;
err_offload:
if (fwd_and_modify_cap)
mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
err_attach:
err_reg_set:
if (fwd_and_modify_cap)
mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
err_get_chain:
mlx5e_mod_hdr_dealloc(&mod_acts);
kfree(slow_attr);
return ERR_PTR(err);
}
void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow)
{
struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh;
struct mlx5_flow_attr *slow_attr;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
if (!slow_attr) {
mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
return;
}
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
if (slow_mh) {
slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh);
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
if (slow_mh) {
mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh);
mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
flow->chain_mapping = 0;
flow->attr->slow_mh = NULL;
}
flow_flag_clear(flow, SLOW);
kfree(slow_attr);
}
/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
* function.
*/
static void unready_flow_add(struct mlx5e_tc_flow *flow,
struct list_head *unready_flows)
{
flow_flag_set(flow, NOT_READY);
list_add_tail(&flow->unready, unready_flows);
}
/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
* function.
*/
static void unready_flow_del(struct mlx5e_tc_flow *flow)
{
list_del(&flow->unready);
flow_flag_clear(flow, NOT_READY);
}
static void add_unready_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
esw = flow->priv->mdev->priv.eswitch;
rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &rpriv->uplink_priv;
mutex_lock(&uplink_priv->unready_flows_lock);
unready_flow_add(flow, &uplink_priv->unready_flows);
mutex_unlock(&uplink_priv->unready_flows_lock);
}
static void remove_unready_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
esw = flow->priv->mdev->priv.eswitch;
rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &rpriv->uplink_priv;
mutex_lock(&uplink_priv->unready_flows_lock);
if (flow_flag_test(flow, NOT_READY))
unready_flow_del(flow);
mutex_unlock(&uplink_priv->unready_flows_lock);
}
bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
{
struct mlx5_core_dev *out_mdev, *route_mdev;
struct mlx5e_priv *out_priv, *route_priv;
out_priv = netdev_priv(out_dev);
out_mdev = out_priv->mdev;
route_priv = netdev_priv(route_dev);
route_mdev = route_priv->mdev;
if (out_mdev->coredev_type != MLX5_COREDEV_PF)
return false;
if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
route_mdev->coredev_type != MLX5_COREDEV_SF)
return false;
return mlx5e_same_hw_devs(out_priv, route_priv);
}
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
{
struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_core_dev *route_mdev;
struct mlx5_devcom_comp_dev *pos;
struct mlx5_eswitch *esw;
u16 vhca_id;
int err;
out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch;
route_priv = netdev_priv(route_dev);
route_mdev = route_priv->mdev;
vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (!err)
return err;
if (!mlx5_lag_is_active(out_priv->mdev))
return err;
rcu_read_lock();
err = -ENODEV;
mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
if (!err)
break;
}
rcu_read_unlock();
return err;
}
static int
verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
{
if (!(actions &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
return -EOPNOTSUPP;
}
if (!(~actions &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
return -EOPNOTSUPP;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
return -EOPNOTSUPP;
}
return 0;
}
static bool
has_encap_dests(struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int out_index;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
return true;
return false;
}
static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
bool vf_tun;
int err = 0;
err = verify_attr_actions(attr->action, extack);
if (err)
goto err_out;
if (mlx5e_is_eswitch_flow(flow) && has_encap_dests(attr)) {
err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
if (err)
goto err_out;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
goto err_out;
}
if (attr->branch_true &&
attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true);
if (err)
goto err_out;
}
if (attr->branch_false &&
attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false);
if (err)
goto err_out;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
if (err)
goto err_out;
}
err_out:
return err;
}
static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
u32 max_prio, max_chain;
int err = 0;
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
/* We check chain range only for tc flows.
* For ft flows, we checked attr->chain was originally 0 and set it to
* FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb().
*/
max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range");
err = -EOPNOTSUPP;
goto err_out;
}
max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
if (attr->prio > max_prio) {
NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range");
err = -EOPNOTSUPP;
goto err_out;
}
if (flow_flag_test(flow, TUN_RX)) {
err = mlx5e_attach_decap_route(priv, flow);
if (err)
goto err_out;
if (!attr->chain && esw_attr->int_port &&
attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* If decap route device is internal port, change the
* source vport value in reg_c0 back to uplink just in
* case the rule performs goto chain > 0. If we have a miss
* on chain > 0 we want the metadata regs to hold the
* chain id so SW will resume handling of this packet
* from the proper chain.
*/
u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
esw_attr->in_rep->vport);
err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
metadata);
if (err)
goto err_out;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
}
if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
err = mlx5e_attach_decap(priv, flow, extack);
if (err)
goto err_out;
}
if (netif_is_ovs_master(parse_attr->filter_dev)) {
struct mlx5e_tc_int_port *int_port;
if (attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule is only supported on chain 0");
err = -EOPNOTSUPP;
goto err_out;
}
if (attr->dest_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule offload doesn't support goto action");
err = -EOPNOTSUPP;
goto err_out;
}
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
parse_attr->filter_dev->ifindex,
flow_flag_test(flow, EGRESS) ?
MLX5E_TC_INT_PORT_EGRESS :
MLX5E_TC_INT_PORT_INGRESS);
if (IS_ERR(int_port)) {
err = PTR_ERR(int_port);
goto err_out;
}
esw_attr->int_port = int_port;
}
err = post_process_attr(flow, attr, extack);
if (err)
goto err_out;
err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow);
if (err)
goto err_out;
/* we get here if one of the following takes place:
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
if (flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
if (IS_ERR(flow->rule[0])) {
err = PTR_ERR(flow->rule[0]);
goto err_out;
}
flow_flag_set(flow, OFFLOADED);
return 0;
err_out:
flow_flag_set(flow, FAILED);
return err;
}
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
headers_v,
geneve_tlv_option_0_data);
return !!geneve_tlv_opt_0_data;
}
static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
if (!attr)
return;
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->parse_attr);
kfree(attr);
}
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
mlx5e_put_flow_tunnel_id(flow);
remove_unready_flow(flow);
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))
mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
complete_all(&flow->del_hw_done);
if (mlx5_flow_has_geneve_opt(flow))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
free_flow_post_acts(flow);
mlx5_free_flow_attr_actions(flow, attr);
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
}
struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *attr;
attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
return attr->counter;
}
/* Iterate over tmp_list of flows attached to flow_list head. */
void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
{
struct mlx5e_tc_flow *flow, *tmp;
list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
mlx5e_flow_put(priv, flow);
}
static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
int peer_index)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
struct mlx5e_tc_flow *peer_flow;
struct mlx5e_tc_flow *tmp;
if (!flow_flag_test(flow, ESWITCH) ||
!flow_flag_test(flow, DUP))
return;
mutex_lock(&esw->offloads.peer_mutex);
list_del(&flow->peer[peer_index]);
mutex_unlock(&esw->offloads.peer_mutex);
list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
continue;
if (refcount_dec_and_test(&peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
list_del(&peer_flow->peer_flows);
kfree(peer_flow);
}
}
if (list_empty(&flow->peer_flows))
flow_flag_clear(flow, DUP);
}
static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
{
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
if (i == mlx5_get_dev_index(flow->priv->mdev))
continue;
mlx5e_tc_del_fdb_peer_flow(flow, i);
}
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
if (mlx5e_is_eswitch_flow(flow)) {
struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
if (!mlx5_devcom_for_each_peer_begin(devcom)) {
mlx5e_tc_del_fdb_flow(priv, flow);
return;
}
mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
mlx5e_tc_del_nic_flow(priv, flow);
}
}
static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_action *flow_action = &rule->action;
const struct flow_action_entry *act;
int i;
if (chain)
return false;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_GOTO:
return true;
case FLOW_ACTION_SAMPLE:
return true;
default:
continue;
}
}
return false;
}
static int
enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
struct flow_dissector_key_enc_opts *opts,
struct netlink_ext_ack *extack,
bool *dont_care)
{
struct geneve_opt *opt;
int off = 0;
*dont_care = true;
while (opts->len > off) {
opt = (struct geneve_opt *)&opts->data[off];
if (!(*dont_care) || opt->opt_class || opt->type ||
memchr_inv(opt->opt_data, 0, opt->length * 4)) {
*dont_care = false;
if (opt->opt_class != htons(U16_MAX) ||
opt->type != U8_MAX) {
NL_SET_ERR_MSG_MOD(extack,
"Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
"Partial match of tunnel options in chain > 0 isn't supported");
return -EOPNOTSUPP;
}
}
off += sizeof(struct geneve_opt) + opt->length * 4;
}
return 0;
}
#define COPY_DISSECTOR(rule, diss_key, dst)\
({ \
struct flow_rule *__rule = (rule);\
typeof(dst) __dst = dst;\
\
memcpy(__dst,\
skb_flow_dissector_target(__rule->match.dissector,\
diss_key,\
__rule->match.key),\
sizeof(*__dst));\
})
static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct flow_cls_offload *f,
struct net_device *filter_dev)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct flow_match_enc_opts enc_opts_match;
struct tunnel_match_enc_opts tun_enc_opts;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5e_rep_priv *uplink_rpriv;
struct tunnel_match_key tunnel_key;
bool enc_opts_is_dont_care = true;
u32 tun_id, enc_opts_id = 0;
struct mlx5_eswitch *esw;
u32 value, mask;
int err;
esw = priv->mdev->priv.eswitch;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
memset(&tunnel_key, 0, sizeof(tunnel_key));
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
&tunnel_key.enc_control);
if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
&tunnel_key.enc_ipv4);
else
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
&tunnel_key.enc_ipv6);
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
&tunnel_key.enc_tp);
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
&tunnel_key.enc_key_id);
tunnel_key.filter_ifindex = filter_dev->ifindex;
err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
if (err)
return err;
flow_rule_match_enc_opts(rule, &enc_opts_match);
err = enc_opts_is_dont_care_or_full_match(priv,
enc_opts_match.mask,
extack,
&enc_opts_is_dont_care);
if (err)
goto err_enc_opts;
if (!enc_opts_is_dont_care) {
memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
memcpy(&tun_enc_opts.key, enc_opts_match.key,
sizeof(*enc_opts_match.key));
memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
sizeof(*enc_opts_match.mask));
err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
&tun_enc_opts, &enc_opts_id);
if (err)
goto err_enc_opts;
}
value = tun_id << ENC_OPTS_BITS | enc_opts_id;
mask = enc_opts_id ? TUNNEL_ID_MASK :
(TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
if (attr->chain) {
mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
TUNNEL_TO_REG, value, mask);
} else {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
err = mlx5e_tc_match_to_reg_set(priv->mdev,
mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
TUNNEL_TO_REG, value);
if (err)
goto err_set;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
flow->attr->tunnel_id = value;
return 0;
err_set:
if (enc_opts_id)
mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
enc_opts_id);
err_enc_opts:
mapping_remove(uplink_priv->tunnel_mapping, tun_id);
return err;
}
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
{
u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw;
esw = flow->priv->mdev->priv.eswitch;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
if (tun_id)
mapping_remove(uplink_priv->tunnel_mapping, tun_id);
if (enc_opts_id)
mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
enc_opts_id);
}
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
void *headers_c, void *headers_v)
{
bool ip_version_cap;
ip_version_cap = outer ?
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
ft_field_support.outer_ip_version) :
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
ft_field_support.inner_ip_version);
if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
(match->key->n_proto == htons(ETH_P_IP) ||
match->key->n_proto == htons(ETH_P_IPV6))) {
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
} else {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
ntohs(match->mask->n_proto));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
ntohs(match->key->n_proto));
}
}
u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
{
void *headers_v;
u16 ethertype;
u8 ip_version;
if (outer)
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
else
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
/* Return ip_version converted from ethertype anyway */
if (!ip_version) {
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
ip_version = 4;
else if (ethertype == ETH_P_IPV6)
ip_version = 6;
}
return ip_version;
}
/* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
* And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
* +---------+----------------------------------------+
* |Arriving | Arriving Outer Header |
* | Inner +---------+---------+---------+----------+
* | Header | Not-ECT | ECT(0) | ECT(1) | CE |
* +---------+---------+---------+---------+----------+
* | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
* | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
* | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
* | CE | CE | CE | CE | CE |
* +---------+---------+---------+---------+----------+
*
* Tc matches on inner after decapsulation on tunnel device, but hw offload matches
* the inner ip_ecn value before hardware decap action.
*
* Cells marked are changed from original inner packet ip_ecn value during decap, and
* so matching those values on inner ip_ecn before decap will fail.
*
* The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
* except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
* and such we can drop the inner ip_ecn=CE match.
*/
static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
bool *match_inner_ecn)
{
u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct flow_match_ip match;
*match_inner_ecn = true;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
flow_rule_match_enc_ip(rule, &match);
outer_ecn_key = match.key->tos & INET_ECN_MASK;
outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
flow_rule_match_ip(rule, &match);
inner_ecn_key = match.key->tos & INET_ECN_MASK;
inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
}
if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
return -EOPNOTSUPP;
}
if (!outer_ecn_mask) {
if (!inner_ecn_mask)
return 0;
NL_SET_ERR_MSG_MOD(extack,
"Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
netdev_warn(priv->netdev,
"Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
return -EOPNOTSUPP;
}
if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
NL_SET_ERR_MSG_MOD(extack,
"Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
netdev_warn(priv->netdev,
"Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
return -EOPNOTSUPP;
}
if (!inner_ecn_mask)
return 0;
/* Both inner and outer have full mask on ecn */
if (outer_ecn_key == INET_ECN_ECT_1) {
/* inner ecn might change by DECAP action */
NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
return -EOPNOTSUPP;
}
if (outer_ecn_key != INET_ECN_CE)
return 0;
if (inner_ecn_key != INET_ECN_CE) {
/* Can't happen in software, as packet ecn will be changed to CE after decap */
NL_SET_ERR_MSG_MOD(extack,
"Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
netdev_warn(priv->netdev,
"Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
return -EOPNOTSUPP;
}
/* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
* drop match on inner ecn
*/
*match_inner_ecn = false;
return 0;
}
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
u8 *match_level,
bool *match_inner)
{
struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
bool needs_mapping, sets_mapping;
int err;
if (!mlx5e_is_eswitch_flow(flow)) {
NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
return -EOPNOTSUPP;
}
needs_mapping = !!flow->attr->chain;
sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
*match_inner = !needs_mapping;
if ((needs_mapping || sets_mapping) &&
!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
NL_SET_ERR_MSG_MOD(extack,
"Chains on tunnel devices isn't supported without register loopback support");
netdev_warn(priv->netdev,
"Chains on tunnel devices isn't supported without register loopback support");
return -EOPNOTSUPP;
}
if (!flow->attr->chain) {
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
match_level);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to parse tunnel attributes");
netdev_warn(priv->netdev,
"Failed to parse tunnel attributes");
return err;
}
/* With mpls over udp we decapsulate using packet reformat
* object
*/
if (!netif_is_bareudp(filter_dev))
flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
err = mlx5e_tc_set_attr_rx_tun(flow, spec);
if (err)
return err;
} else if (tunnel) {
struct mlx5_flow_spec *tmp_spec;
tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
if (!tmp_spec) {
NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
return -ENOMEM;
}
memcpy(tmp_spec, spec, sizeof(*tmp_spec));
err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
if (err) {
kvfree(tmp_spec);
NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
return err;
}
err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
kvfree(tmp_spec);
if (err)
return err;
}
if (!needs_mapping && !sets_mapping)
return 0;
return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
}
static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
{
return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
inner_headers);
}
static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
{
return MLX5_ADDR_OF(fte_match_param, spec->match_value,
inner_headers);
}
static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
{
return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
}
static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
{
return MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
}
void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_value(spec) :
get_match_outer_headers_value(spec);
}
void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_criteria(spec) :
get_match_outer_headers_criteria(spec);
}
static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct net_device *ingress_dev;
struct flow_match_meta match;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
return 0;
flow_rule_match_meta(rule, &match);
if (match.mask->l2_miss) {
NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\"");
return -EOPNOTSUPP;
}
if (!match.mask->ingress_ifindex)
return 0;
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EOPNOTSUPP;
}
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
match.key->ingress_ifindex);
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't find the ingress port to match on");
return -ENOENT;
}
if (ingress_dev != filter_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't match on the ingress filter port");
return -EOPNOTSUPP;
}
return 0;
}
static bool skip_key_basic(struct net_device *filter_dev,
struct flow_cls_offload *f)
{
/* When doing mpls over udp decap, the user needs to provide
* MPLS_UC as the protocol in order to be able to match on mpls
* label fields. However, the actual ethertype is IP so we want to
* avoid matching on this, otherwise we'll fail the match.
*/
if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
return true;
return false;
}
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
u8 *inner_match_level, u8 *outer_match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_3);
void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
enum fs_flow_table_type fs_type;
bool match_inner_ecn = true;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
int err;
fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
match_level = outer_match_level;
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_META) |
BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
BIT_ULL(FLOW_DISSECTOR_KEY_CT) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
BIT_ULL(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_dbg(priv->netdev, "Unsupported key used: 0x%llx\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
if (mlx5e_get_tc_tun(filter_dev)) {
bool match_inner = false;
err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
outer_match_level, &match_inner);
if (err)
return err;
if (match_inner) {
/* header pointers should point to the inner headers
* if the packet was decapsulated already.
* outer headers are set by parse_tunnel_attr.
*/
match_level = inner_match_level;
headers_c = get_match_inner_headers_criteria(spec);
headers_v = get_match_inner_headers_value(spec);
}
err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
if (err)
return err;
}
err = mlx5e_flower_parse_meta(filter_dev, f);
if (err)
return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
!skip_key_basic(filter_dev, f)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
mlx5e_tc_set_ethertype(priv->mdev, &match,
match_level == outer_match_level,
headers_c, headers_v);
if (match.mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
is_vlan_dev(filter_dev)) {
struct flow_dissector_key_vlan filter_dev_mask;
struct flow_dissector_key_vlan filter_dev_key;
struct flow_match_vlan match;
if (is_vlan_dev(filter_dev)) {
match.key = &filter_dev_key;
match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
match.key->vlan_priority = 0;
match.mask = &filter_dev_mask;
memset(match.mask, 0xff, sizeof(*match.mask));
match.mask->vlan_priority = 0;
} else {
flow_rule_match_vlan(rule, &match);
}
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
svlan_tag, 1);
} else {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
cvlan_tag, 1);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
match.mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
match.key->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
match.mask->vlan_priority);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
match.mask->vlan_eth_type &&
MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
ft_field_support.outer_second_vid,
fs_type)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_cvlan_tag, 1);
spec->match_criteria_enable |=
MLX5_MATCH_MISC_PARAMETERS;
}
}
} else if (*match_level != MLX5_MATCH_NONE) {
/* cvlan_tag enabled in match criteria and
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
*match_level = MLX5_MATCH_L2;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
struct flow_match_vlan match;
flow_rule_match_cvlan(rule, &match);
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
fs_type)) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on CVLAN is not supported");
return -EOPNOTSUPP;
}
if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
MLX5_SET(fte_match_set_misc, misc_v,
outer_second_svlan_tag, 1);
} else {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_cvlan_tag, 1);
MLX5_SET(fte_match_set_misc, misc_v,
outer_second_cvlan_tag, 1);
}
MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
match.mask->vlan_id);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
match.key->vlan_id);
MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
match.mask->vlan_priority);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
flow_rule_match_eth_addrs(rule, &match);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
match.mask->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
match.key->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
match.mask->src);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
match.key->src);
if (!is_zero_ether_addr(match.mask->src) ||
!is_zero_ether_addr(match.mask->dst))
*match_level = MLX5_MATCH_L2;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
/* the HW doesn't support frag first/later */
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
return -EOPNOTSUPP;
}
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
match.key->flags & FLOW_DIS_IS_FRAGMENT);
/* the HW doesn't need L3 inline to match on frag=no */
if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
*match_level = MLX5_MATCH_L2;
/* *** L2 attributes parsing up to here *** */
else
*match_level = MLX5_MATCH_L3;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
ip_proto = match.key->ip_proto;
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
match.key->ip_proto);
if (match.mask->ip_proto)
*match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&match.mask->src, sizeof(match.mask->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&match.key->src, sizeof(match.key->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&match.mask->dst, sizeof(match.mask->dst));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&match.key->dst, sizeof(match.key->dst));
if (match.mask->src || match.mask->dst)
*match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&match.mask->src, sizeof(match.mask->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&match.key->src, sizeof(match.key->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&match.mask->dst, sizeof(match.mask->dst));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&match.key->dst, sizeof(match.key->dst));
if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
*match_level = MLX5_MATCH_L3;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
flow_rule_match_ip(rule, &match);
if (match_inner_ecn) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
match.mask->tos & 0x3);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
match.key->tos & 0x3);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
match.mask->tos >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
match.key->tos >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
match.mask->ttl);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
match.key->ttl);
if (match.mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
ft_field_support.outer_ipv4_ttl)) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on TTL is not supported");
return -EOPNOTSUPP;
}
if (match.mask->tos || match.mask->ttl)
*match_level = MLX5_MATCH_L3;
}
/* *** L3 attributes parsing up to here *** */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
switch (ip_proto) {
case IPPROTO_TCP:
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
tcp_sport, ntohs(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
tcp_sport, ntohs(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
tcp_dport, ntohs(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
tcp_dport, ntohs(match.key->dst));
break;
case IPPROTO_UDP:
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
udp_sport, ntohs(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
udp_sport, ntohs(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
udp_dport, ntohs(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
udp_dport, ntohs(match.key->dst));
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Only UDP and TCP transports are supported for L4 matching");
netdev_err(priv->netdev,
"Only UDP and TCP transport are supported\n");
return -EINVAL;
}
if (match.mask->src || match.mask->dst)
*match_level = MLX5_MATCH_L4;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
struct flow_match_tcp match;
flow_rule_match_tcp(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
ntohs(match.mask->flags));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
ntohs(match.key->flags));
if (match.mask->flags)
*match_level = MLX5_MATCH_L4;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
struct flow_match_icmp match;
flow_rule_match_icmp(rule, &match);
switch (ip_proto) {
case IPPROTO_ICMP:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_ICMP)) {
NL_SET_ERR_MSG_MOD(extack,
"Match on Flex protocols for ICMP is not supported");
return -EOPNOTSUPP;
}
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
match.key->type);
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
match.mask->code);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
match.key->code);
break;
case IPPROTO_ICMPV6:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
MLX5_FLEX_PROTO_ICMPV6)) {
NL_SET_ERR_MSG_MOD(extack,
"Match on Flex protocols for ICMPV6 is not supported");
return -EOPNOTSUPP;
}
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
match.key->type);
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
match.mask->code);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
match.key->code);
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Code and type matching only with ICMP and ICMPv6");
netdev_err(priv->netdev,
"Code and type matching only with ICMP and ICMPv6\n");
return -EINVAL;
}
if (match.mask->code || match.mask->type) {
*match_level = MLX5_MATCH_L4;
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
}
}
/* Currently supported only for MPLS over UDP */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
!netif_is_bareudp(filter_dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on MPLS is supported only for MPLS over UDP");
netdev_err(priv->netdev,
"Matching on MPLS is supported only for MPLS over UDP\n");
return -EOPNOTSUPP;
}
return 0;
}
static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev)
{
u8 inner_match_level, outer_match_level, non_tunnel_match_level;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
bool is_eswitch_flow;
int err;
inner_match_level = MLX5_MATCH_NONE;
outer_match_level = MLX5_MATCH_NONE;
err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
&inner_match_level, &outer_match_level);
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
if (!err && is_eswitch_flow) {
rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < non_tunnel_match_level)) {
NL_SET_ERR_MSG_MOD(extack,
"Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
non_tunnel_match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
flow->attr->inner_match_level = inner_match_level;
flow->attr->outer_match_level = outer_match_level;
return err;
}
struct mlx5_fields {
u8 field;
u8 field_bsize;
u32 field_mask;
u32 offset;
u32 match_offset;
};
#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
offsetof(struct pedit_headers, field) + (off), \
MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
/* masked values are the same and there are no rewrites that do not have a
* match.
*/
#define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
type matchmaskx = *(type *)(matchmaskp); \
type matchvalx = *(type *)(matchvalp); \
type maskx = *(type *)(maskp); \
type valx = *(type *)(valp); \
\
(valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
matchmaskx)); \
})
static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
void *matchmaskp, u8 bsize)
{
bool same = false;
switch (bsize) {
case 8:
same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
break;
case 16:
same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
break;
case 32:
same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
break;
}
return same;
}
static struct mlx5_fields fields[] = {
OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
/* in linux iphdr tcp_flags is 8 bits long */
OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
static unsigned long mask_to_le(unsigned long mask, int size)
{
__be32 mask_be32;
__be16 mask_be16;
if (size == 32) {
mask_be32 = (__force __be32)(mask);
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
} else if (size == 16) {
mask_be32 = (__force __be32)(mask);
mask_be16 = *(__be16 *)&mask_be32;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
}
return mask;
}
static int offload_pedit_fields(struct mlx5e_priv *priv,
int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
struct pedit_headers_action *hdrs = parse_attr->hdrs;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
unsigned long mask, field_mask;
int i, first, last, next_z;
struct mlx5_fields *f;
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
set_masks = &hdrs[0].masks;
add_masks = &hdrs[1].masks;
set_vals = &hdrs[0].vals;
add_vals = &hdrs[1].vals;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
bool skip;
f = &fields[i];
/* avoid seeing bits set from previous iterations */
s_mask = 0;
a_mask = 0;
s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset;
s_mask = *s_masks_p & f->field_mask;
a_mask = *a_masks_p & f->field_mask;
if (!s_mask && !a_mask) /* nothing to offload here */
continue;
if (s_mask && a_mask) {
NL_SET_ERR_MSG_MOD(extack,
"can't set and add to the same HW field");
netdev_warn(priv->netdev,
"mlx5: can't set and add to the same HW field (%x)\n",
f->field);
return -EOPNOTSUPP;
}
skip = false;
if (s_mask) {
void *match_mask = headers_c + f->match_offset;
void *match_val = headers_v + f->match_offset;
cmd = MLX5_ACTION_TYPE_SET;
mask = s_mask;
vals_p = (void *)set_vals + f->offset;
/* don't rewrite if we have a match on the same value */
if (cmp_val_mask(vals_p, s_masks_p, match_val,
match_mask, f->field_bsize))
skip = true;
/* clear to denote we consumed this field */
*s_masks_p &= ~f->field_mask;
} else {
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
/* add 0 is no change */
if ((*(u32 *)vals_p & f->field_mask) == 0)
skip = true;
/* clear to denote we consumed this field */
*a_masks_p &= ~f->field_mask;
}
if (skip)
continue;
mask = mask_to_le(mask, f->field_bsize);
first = find_first_bit(&mask, f->field_bsize);
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
last = find_last_bit(&mask, f->field_bsize);
if (first < next_z && next_z < last) {
NL_SET_ERR_MSG_MOD(extack,
"rewrite of few sub-fields isn't supported");
netdev_warn(priv->netdev,
"mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
mask);
return -EOPNOTSUPP;
}
action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
if (IS_ERR(action)) {
NL_SET_ERR_MSG_MOD(extack,
"too many pedit actions, can't offload");
mlx5_core_warn(priv->mdev,
"mlx5: parsed %d pedit actions, can't do more\n",
mod_acts->num_actions);
return PTR_ERR(action);
}
MLX5_SET(set_action_in, action, action_type, cmd);
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
int start;
field_mask = mask_to_le(f->field_mask, f->field_bsize);
/* if field is bit sized it can start not from first bit */
start = find_first_bit(&field_mask, f->field_bsize);
MLX5_SET(set_action_in, action, offset, first - start);
/* length is num of bits to be written, zero means length of 32 */
MLX5_SET(set_action_in, action, length, (last - first + 1));
}
if (f->field_bsize == 32)
MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
else if (f->field_bsize == 16)
MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
else if (f->field_bsize == 8)
MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
++mod_acts->num_actions;
}
return 0;
}
static const struct pedit_headers zero_masks = {};
static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
struct pedit_headers *cmd_masks;
u8 cmd;
for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
cmd_masks = &parse_attr->hdrs[cmd].masks;
if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
16, 1, cmd_masks, sizeof(zero_masks), true);
return -EOPNOTSUPP;
}
}
return 0;
}
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
u32 *action_flags,
struct netlink_ext_ack *extack)
{
int err;
err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
if (err)
goto out_dealloc_parsed_actions;
err = verify_offload_pedit_fields(priv, parse_attr, extack);
if (err)
goto out_dealloc_parsed_actions;
return 0;
out_dealloc_parsed_actions:
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
return err;
}
struct ip_ttl_word {
__u8 ttl;
__u8 protocol;
__sum16 check;
};
struct ipv6_hoplimit_word {
__be16 payload_len;
__u8 nexthdr;
__u8 hop_limit;
};
static bool
is_flow_action_modify_ip_header(struct flow_action *flow_action)
{
const struct flow_action_entry *act;
u32 mask, offset;
u8 htype;
int i;
/* For IPv4 & IPv6 header check 4 byte word,
* to determine that modified fields
* are NOT ttl & hop_limit only.
*/
flow_action_for_each(i, act, flow_action) {
if (act->id != FLOW_ACTION_MANGLE &&
act->id != FLOW_ACTION_ADD)
continue;
htype = act->mangle.htype;
offset = act->mangle.offset;
mask = ~act->mangle.mask;
if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
struct ip_ttl_word *ttl_word =
(struct ip_ttl_word *)&mask;
if (offset != offsetof(struct iphdr, ttl) ||
ttl_word->protocol ||
ttl_word->check)
return true;
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
(struct ipv6_hoplimit_word *)&mask;
if (offset != offsetof(struct ipv6hdr, payload_len) ||
hoplimit_word->payload_len ||
hoplimit_word->nexthdr)
return true;
}
}
return false;
}
static bool modify_header_match_supported(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
u32 actions,
struct netlink_ext_ack *extack)
{
bool modify_ip_header;
void *headers_c;
void *headers_v;
u16 ethertype;
u8 ip_proto;
headers_c = mlx5e_get_match_headers_criteria(actions, spec);
headers_v = mlx5e_get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
goto out_ok;
modify_ip_header = is_flow_action_modify_ip_header(flow_action);
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of non TCP/UDP");
netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
ip_proto);
return false;
}
out_ok:
return true;
}
static bool
actions_match_supported_fdb(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev,
"current firmware doesn't support split rule for port mirroring\n");
return false;
}
return true;
}
static bool
actions_match_supported(struct mlx5e_priv *priv,
struct flow_action *flow_action,
u32 actions,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions,
extack))
return false;
if (mlx5e_is_eswitch_flow(flow) &&
!actions_match_supported_fdb(priv, flow, extack))
return false;
return true;
}
static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
return priv->mdev == peer_priv->mdev;
}
bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
u64 fsystem_guid, psystem_guid;
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
return (fsystem_guid == psystem_guid);
}
static int
actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct pedit_headers_action *hdrs = parse_attr->hdrs;
enum mlx5_flow_namespace_type ns_type;
int err;
if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
!hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
return 0;
ns_type = mlx5e_get_flow_namespace(flow);
err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
if (err)
return err;
if (parse_attr->mod_hdr_acts.num_actions > 0)
return 0;
/* In case all pedit actions are skipped, remove the MOD_HDR flag. */
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
return 0;
if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
attr->esw_attr->split_count = 0;
return 0;
}
static struct mlx5_flow_attr*
mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 attr_sz = ns_to_attr_sz(ns_type);
struct mlx5_flow_attr *attr2;
attr2 = mlx5_alloc_flow_attr(ns_type);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
if (!attr2 || !parse_attr) {
kvfree(parse_attr);
kfree(attr2);
return NULL;
}
memcpy(attr2, attr, attr_sz);
INIT_LIST_HEAD(&attr2->list);
parse_attr->filter_dev = attr->parse_attr->filter_dev;
attr2->action = 0;
attr2->counter = NULL;
attr2->tc_act_cookies_count = 0;
attr2->flags = 0;
attr2->parse_attr = parse_attr;
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
attr2->act_id_restore_rule = NULL;
memset(&attr2->ct_attr, 0, sizeof(attr2->ct_attr));
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr2->esw_attr->out_count = 0;
attr2->esw_attr->split_count = 0;
}
attr2->branch_true = NULL;
attr2->branch_false = NULL;
attr2->jumping_attr = NULL;
return attr2;
}
struct mlx5_flow_attr *
mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_attr *attr;
int i;
list_for_each_entry(attr, &flow->attrs, list) {
esw_attr = attr->esw_attr;
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
return attr;
}
}
return NULL;
}
void
mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
{
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr;
list_for_each_entry(attr, &flow->attrs, list) {
if (list_is_last(&attr->list, &flow->attrs))
break;
mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
}
}
static void
free_flow_post_acts(struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *attr, *tmp;
list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
if (list_is_last(&attr->list, &flow->attrs))
break;
mlx5_free_flow_attr_actions(flow, attr);
list_del(&attr->list);
kvfree(attr->parse_attr);
kfree(attr);
}
}
int
mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
{
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr;
int err = 0;
list_for_each_entry(attr, &flow->attrs, list) {
if (list_is_last(&attr->list, &flow->attrs))
break;
err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
if (err)
break;
}
return err;
}
/* TC filter rule HW translation:
*
* +---------------------+
* + ft prio (tc chain) +
* + original match +
* +---------------------+
* |
* | if multi table action
* |
* v
* +---------------------+
* + post act ft |<----.
* + match fte id | | split on multi table action
* + do actions |-----'
* +---------------------+
* |
* |
* v
* Do rest of the actions after last multi table action.
*/
static int
alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
{
struct mlx5e_post_act *post_act = get_post_action(flow->priv);
struct mlx5_flow_attr *attr, *next_attr = NULL;
struct mlx5e_post_act_handle *handle;
int err;
/* This is going in reverse order as needed.
* The first entry is the last attribute.
*/
list_for_each_entry(attr, &flow->attrs, list) {
if (!next_attr) {
/* Set counter action on last post act rule. */
attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
}
if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) {
err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
if (err)
goto out_free;
}
/* Don't add post_act rule for first attr (last in the list).
* It's being handled by the caller.
*/
if (list_is_last(&attr->list, &flow->attrs))
break;
err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
if (err)
goto out_free;
err = post_process_attr(flow, attr, extack);
if (err)
goto out_free;
handle = mlx5e_tc_post_act_add(post_act, attr);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto out_free;
}
attr->post_act_handle = handle;
if (attr->jumping_attr) {
err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr);
if (err)
goto out_free;
}
next_attr = attr;
}
if (flow_flag_test(flow, SLOW))
goto out;
err = mlx5e_tc_offload_flow_post_acts(flow);
if (err)
goto out_free;
out:
return 0;
out_free:
free_flow_post_acts(flow);
return err;
}
static int
alloc_branch_attr(struct mlx5e_tc_flow *flow,
struct mlx5e_tc_act_branch_ctrl *cond,
struct mlx5_flow_attr **cond_attr,
u32 *jump_count,
struct netlink_ext_ack *extack)
{
struct mlx5_flow_attr *attr;
int err = 0;
*cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr,
mlx5e_get_flow_namespace(flow));
if (!(*cond_attr))
return -ENOMEM;
attr = *cond_attr;
switch (cond->act_id) {
case FLOW_ACTION_DROP:
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
break;
case FLOW_ACTION_ACCEPT:
case FLOW_ACTION_PIPE:
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
break;
case FLOW_ACTION_JUMP:
if (*jump_count) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps");
err = -EOPNOTSUPP;
goto out_err;
}
*jump_count = cond->extval;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
break;
default:
err = -EOPNOTSUPP;
goto out_err;
}
return err;
out_err:
kfree(*cond_attr);
*cond_attr = NULL;
return err;
}
static void
dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
struct mlx5_flow_attr *attr, struct mlx5e_priv *priv,
struct mlx5e_tc_jump_state *jump_state)
{
if (!jump_state->jump_count)
return;
/* Single tc action can instantiate multiple offload actions (e.g. pedit)
* Jump only over a tc action
*/
if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index)
return;
jump_state->last_id = act->id;
jump_state->last_index = act->hw_index;
/* nothing to do for intermediate actions */
if (--jump_state->jump_count > 1)
return;
if (jump_state->jump_count == 1) { /* last action in the jump action list */
/* create a new attribute after this action */
jump_state->jump_target = true;
if (tc_act->is_terminating_action) { /* the branch ends here */
attr->flags |= MLX5_ATTR_FLAG_TERMINATING;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else { /* the branch continues executing the rest of the actions */
struct mlx5e_post_act *post_act;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
post_act = get_post_action(priv);
attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
}
} else if (jump_state->jump_count == 0) { /* first attr after the jump action list */
/* This is the post action for the jumping attribute (either red or green)
* Use the stored jumping_attr to set the post act id on the jumping attribute
*/
attr->jumping_attr = jump_state->jumping_attr;
}
}
static int
parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr,
struct mlx5e_tc_jump_state *jump_state,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_act_branch_ctrl cond_true, cond_false;
u32 jump_count = jump_state->jump_count;
int err;
if (!tc_act->get_branch_ctrl)
return 0;
tc_act->get_branch_ctrl(act, &cond_true, &cond_false);
err = alloc_branch_attr(flow, &cond_true,
&attr->branch_true, &jump_count, extack);
if (err)
goto out_err;
if (jump_count)
jump_state->jumping_attr = attr->branch_true;
err = alloc_branch_attr(flow, &cond_false,
&attr->branch_false, &jump_count, extack);
if (err)
goto err_branch_false;
if (jump_count && !jump_state->jumping_attr)
jump_state->jumping_attr = attr->branch_false;
jump_state->jump_count = jump_count;
/* branching action requires its own counter */
attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_flag_set(flow, USE_ACT_STATS);
return 0;
err_branch_false:
free_branch_attr(flow, attr->branch_true);
out_err:
return err;
}
static int
parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action)
{
struct netlink_ext_ack *extack = parse_state->extack;
struct mlx5e_tc_flow *flow = parse_state->flow;
struct mlx5e_tc_jump_state jump_state = {};
struct mlx5_flow_attr *attr = flow->attr;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_priv *priv = flow->priv;
struct mlx5_flow_attr *prev_attr;
struct flow_action_entry *act;
struct mlx5e_tc_act *tc_act;
int err, i, i_split = 0;
bool is_missable;
ns_type = mlx5e_get_flow_namespace(flow);
list_add(&attr->list, &flow->attrs);
flow_action_for_each(i, act, flow_action) {
jump_state.jump_target = false;
is_missable = false;
prev_attr = attr;
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act) {
NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
err = -EOPNOTSUPP;
goto out_free_post_acts;
}
if (tc_act->can_offload && !tc_act->can_offload(parse_state, act, i, attr)) {
err = -EOPNOTSUPP;
goto out_free_post_acts;
}
err = tc_act->parse_action(parse_state, act, priv, attr);
if (err)
goto out_free_post_acts;
dec_jump_count(act, tc_act, attr, priv, &jump_state);
err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
if (err)
goto out_free_post_acts;
parse_state->actions |= attr->action;
/* Split attr for multi table act if not the last act. */
if (jump_state.jump_target ||
(tc_act->is_multi_table_act &&
tc_act->is_multi_table_act(priv, act, attr) &&
i < flow_action->num_entries - 1)) {
is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false;
err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr,
ns_type);
if (err)
goto out_free_post_acts;
attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
if (!attr) {
err = -ENOMEM;
goto out_free_post_acts;
}
i_split = i + 1;
parse_state->if_count = 0;
list_add(&attr->list, &flow->attrs);
}
if (is_missable) {
/* Add counter to prev, and assign act to new (next) attr */
prev_attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_flag_set(flow, USE_ACT_STATS);
attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
} else if (!tc_act->stats_action) {
prev_attr->tc_act_cookies[prev_attr->tc_act_cookies_count++] = act->cookie;
}
}
err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type);
if (err)
goto out_free_post_acts;
err = alloc_flow_post_acts(flow, extack);
if (err)
goto out_free_post_acts;
return 0;
out_free_post_acts:
free_flow_post_acts(flow);
return err;
}
static int
flow_action_supported(struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
}
if (!flow_action_hw_stats_check(flow_action, extack,
FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
}
return 0;
}
static int
parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
int err;
err = flow_action_supported(flow_action, extack);
if (err)
return err;
attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
parse_attr = attr->parse_attr;
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
err = parse_tc_actions(parse_state, flow_action);
if (err)
return err;
err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
if (err)
return err;
err = verify_attr_actions(attr->action, extack);
if (err)
return err;
if (!actions_match_supported(priv, flow_action, parse_state->actions,
parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
}
static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
struct mlx5e_priv *peer_priv;
peer_priv = netdev_priv(peer_netdev);
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
mlx5e_eswitch_vf_rep(priv->netdev) &&
mlx5e_eswitch_vf_rep(peer_netdev) &&
mlx5e_same_hw_devs(priv, peer_priv));
}
static bool same_hw_reps(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
struct mlx5e_priv *peer_priv;
peer_priv = netdev_priv(peer_netdev);
return mlx5e_eswitch_rep(priv->netdev) &&
mlx5e_eswitch_rep(peer_netdev) &&
mlx5e_same_hw_devs(priv, peer_priv);
}
static bool is_lag_dev(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
return ((mlx5_lag_is_sriov(priv->mdev) ||
mlx5_lag_is_multipath(priv->mdev)) &&
same_hw_reps(priv, peer_netdev));
}
static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
{
return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev);
}
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev)
{
if (is_merged_eswitch_vfs(priv, out_dev))
return true;
if (is_multiport_eligible(priv, out_dev))
return true;
if (is_lag_dev(priv, out_dev))
return true;
return mlx5e_eswitch_rep(out_dev) &&
same_port_devs(priv, netdev_priv(out_dev));
}
int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
int ifindex,
enum mlx5e_tc_int_port_type type,
u32 *action,
int out_index)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5e_tc_int_port_priv *int_port_priv;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_int_port *dest_int_port;
int err;
parse_attr = attr->parse_attr;
int_port_priv = mlx5e_get_int_port_priv(priv);
dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
if (IS_ERR(dest_int_port))
return PTR_ERR(dest_int_port);
err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
mlx5e_tc_int_port_get_metadata(dest_int_port));
if (err) {
mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
return err;
}
*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
esw_attr->dest_int_port = dest_int_port;
esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
esw_attr->split_count = out_index;
/* Forward to root fdb for matching against the new source vport */
attr->dest_chain = 0;
return 0;
}
static int
parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_act_parse_state *parse_state;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
struct net_device *filter_dev;
int err;
err = flow_action_supported(flow_action, extack);
if (err)
return err;
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
filter_dev = parse_attr->filter_dev;
parse_state = &parse_attr->parse_state;
mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
parse_state->ct_priv = get_ct_priv(priv);
err = parse_tc_actions(parse_state, flow_action);
if (err)
return err;
/* Forward to/from internal port can only have 1 dest */
if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
esw_attr->out_count > 1) {
NL_SET_ERR_MSG_MOD(extack,
"Rules with internal port can have only one destination");
return -EOPNOTSUPP;
}
/* Forward from tunnel/internal port to internal port is not supported */
if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
esw_attr->dest_int_port) {
NL_SET_ERR_MSG_MOD(extack,
"Forwarding from tunnel/internal port to internal port is not supported");
return -EOPNOTSUPP;
}
err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
if (err)
return err;
if (!actions_match_supported(priv, flow_action, parse_state->actions,
parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
}
static void get_flags(int flags, unsigned long *flow_flags)
{
unsigned long __flow_flags = 0;
if (flags & MLX5_TC_FLAG(INGRESS))
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
if (flags & MLX5_TC_FLAG(EGRESS))
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
__flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
*flow_flags = __flow_flags;
}
static const struct rhashtable_params tc_ht_params = {
.head_offset = offsetof(struct mlx5e_tc_flow, node),
.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
.automatic_shrinking = true,
};
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
unsigned long flags)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5e_rep_priv *rpriv;
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
rpriv = priv->ppriv;
return &rpriv->tc_ht;
} else /* NIC offload */
return &tc->ht;
}
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
struct mlx5_flow_attr *attr = flow->attr;
bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);
if (!esw_paired)
return false;
if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
(is_rep_ingress || act_is_encap))
return true;
if (mlx5_lag_is_mpesw(esw_attr->in_mdev))
return true;
return false;
}
struct mlx5_flow_attr *
mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
{
u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
sizeof(struct mlx5_esw_flow_attr) :
sizeof(struct mlx5_nic_flow_attr);
struct mlx5_flow_attr *attr;
attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
if (!attr)
return attr;
INIT_LIST_HEAD(&attr->list);
return attr;
}
static void
mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
struct mlx5_esw_flow_attr *esw_attr;
if (!attr)
return;
if (attr->post_act_handle)
mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(counter_dev, attr->counter);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
}
if (mlx5e_is_eswitch_flow(flow)) {
esw_attr = attr->esw_attr;
if (esw_attr->int_port)
mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
esw_attr->int_port);
if (esw_attr->dest_int_port)
mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
esw_attr->dest_int_port);
}
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
free_branch_attr(flow, attr->branch_true);
free_branch_attr(flow, attr->branch_false);
}
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct flow_cls_offload *f, unsigned long flow_flags,
struct mlx5e_tc_flow_parse_attr **__parse_attr,
struct mlx5e_tc_flow **__flow)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr;
struct mlx5e_tc_flow *flow;
int err = -ENOMEM;
int out_index;
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
if (!parse_attr || !flow)
goto err_free;
flow->flags = flow_flags;
flow->cookie = f->cookie;
flow->priv = priv;
attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
if (!attr)
goto err_free;
flow->attr = attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->hairpin);
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
INIT_LIST_HEAD(&flow->attrs);
INIT_LIST_HEAD(&flow->peer_flows);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
init_completion(&flow->del_hw_done);
*__flow = flow;
*__parse_attr = parse_attr;
return 0;
err_free:
kfree(flow);
kvfree(parse_attr);
return err;
}
static void
mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct flow_cls_offload *f)
{
attr->parse_attr = parse_attr;
attr->chain = f->common.chain_index;
attr->prio = f->common.prio;
}
static void
mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct flow_cls_offload *f,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
mlx5e_flow_attr_init(attr, parse_attr, f);
esw_attr->in_rep = in_rep;
esw_attr->in_mdev = in_mdev;
if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
MLX5_COUNTER_SOURCE_ESWITCH)
esw_attr->counter_dev = in_mdev;
else
esw_attr->counter_dev = priv->mdev;
}
static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
int attr_size, err;
flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
attr_size = sizeof(struct mlx5_esw_flow_attr);
err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
&parse_attr, &flow);
if (err)
goto out;
parse_attr->filter_dev = filter_dev;
mlx5e_flow_esw_attr_init(flow->attr,
priv, parse_attr,
f, in_rep, in_mdev);
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
f, filter_dev);
if (err)
goto err_free;
/* actions validation depends on parsing the ct matches first */
err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
&flow->attr->ct_attr, extack);
if (err)
goto err_free;
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
complete_all(&flow->init_done);
if (err) {
if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
goto err_free;
add_unready_flow(flow);
}
return flow;
err_free:
mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
}
static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
struct mlx5e_tc_flow *flow,
unsigned long flow_flags,
struct mlx5_eswitch *peer_esw)
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
int i = mlx5_get_dev_index(peer_esw->dev);
struct mlx5e_rep_priv *peer_urpriv;
struct mlx5e_tc_flow *peer_flow;
struct mlx5_core_dev *in_mdev;
int err = 0;
peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
peer_priv = netdev_priv(peer_urpriv->netdev);
/* in_mdev is assigned of which the packet originated from.
* So packets redirected to uplink use the same mdev of the
* original flow and packets redirected from uplink use the
* peer mdev.
* In multiport eswitch it's a special case that we need to
* keep the original mdev.
*/
if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev))
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
parse_attr = flow->attr->parse_attr;
peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
parse_attr->filter_dev,
attr->in_rep, in_mdev);
if (IS_ERR(peer_flow)) {
err = PTR_ERR(peer_flow);
goto out;
}
list_add_tail(&peer_flow->peer_flows, &flow->peer_flows);
flow_flag_set(flow, DUP);
mutex_lock(&esw->offloads.peer_mutex);
list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]);
mutex_unlock(&esw->offloads.peer_mutex);
out:
return err;
}
static int
mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *in_rep = rpriv->rep;
struct mlx5_core_dev *in_mdev = priv->mdev;
struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
int err;
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev);
if (IS_ERR(flow))
return PTR_ERR(flow);
if (!is_peer_flow_needed(flow)) {
*__flow = flow;
return 0;
}
if (!mlx5_devcom_for_each_peer_begin(devcom)) {
err = -ENODEV;
goto clean_flow;
}
mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
if (err)
goto peer_clean;
}
mlx5_devcom_for_each_peer_end(devcom);
*__flow = flow;
return 0;
peer_clean:
mlx5e_tc_del_fdb_peers_flow(flow);
mlx5_devcom_for_each_peer_end(devcom);
clean_flow:
mlx5e_tc_del_fdb_flow(priv, flow);
return err;
}
static int
mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
int attr_size, err;
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
return -EOPNOTSUPP;
} else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
return -EOPNOTSUPP;
}
flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
attr_size = sizeof(struct mlx5_nic_flow_attr);
err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
&parse_attr, &flow);
if (err)
goto out;
parse_attr->filter_dev = filter_dev;
mlx5e_flow_attr_init(flow->attr, parse_attr, f);
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
f, filter_dev);
if (err)
goto err_free;
err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
&flow->attr->ct_attr, extack);
if (err)
goto err_free;
err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
err = mlx5e_tc_add_nic_flow(priv, flow, extack);
if (err)
goto err_free;
flow_flag_set(flow, OFFLOADED);
*__flow = flow;
return 0;
err_free:
flow_flag_set(flow, FAILED);
mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return err;
}
static int
mlx5e_tc_add_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
unsigned long flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned long flow_flags;
int err;
get_flags(flags, &flow_flags);
if (!tc_can_offload_extack(priv->netdev, f->common.extack))
return -EOPNOTSUPP;
if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
err = mlx5e_add_fdb_flow(priv, f, flow_flags,
filter_dev, flow);
else
err = mlx5e_add_nic_flow(priv, f, flow_flags,
filter_dev, flow);
return err;
}
static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
struct mlx5e_rep_priv *rpriv)
{
/* Offloaded flow rule is allowed to duplicate on non-uplink representor
* sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
* function is called from NIC mode.
*/
return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
}
/* As IPsec and TC order is not aligned between software and hardware-offload,
* either IPsec offload or TC offload, not both, is allowed for a specific interface.
*/
static bool is_tc_ipsec_order_check_needed(struct net_device *filter, struct mlx5e_priv *priv)
{
if (!IS_ENABLED(CONFIG_MLX5_EN_IPSEC))
return false;
if (filter != priv->netdev)
return false;
if (mlx5e_eswitch_vf_rep(priv->netdev))
return false;
return true;
}
static int mlx5e_tc_block_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
if (!is_tc_ipsec_order_check_needed(filter, priv))
return 0;
if (mdev->num_block_tc)
return -EBUSY;
mdev->num_block_ipsec++;
return 0;
}
static void mlx5e_tc_unblock_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv)
{
if (!is_tc_ipsec_order_check_needed(filter, priv))
return;
priv->mdev->num_block_ipsec--;
}
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
struct netlink_ext_ack *extack = f->common.extack;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_tc_flow *flow;
int err = 0;
if (!mlx5_esw_hold(priv->mdev))
return -EBUSY;
err = mlx5e_tc_block_ipsec_offload(dev, priv);
if (err)
goto esw_release;
mlx5_esw_get(priv->mdev);
rcu_read_lock();
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
if (flow) {
/* Same flow rule offloaded to non-uplink representor sharing tc block,
* just return 0.
*/
if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
goto rcu_unlock;
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
netdev_warn_once(priv->netdev,
"flow cookie %lx already exists, ignoring\n",
f->cookie);
err = -EEXIST;
goto rcu_unlock;
}
rcu_unlock:
rcu_read_unlock();
if (flow)
goto out;
trace_mlx5e_configure_flower(f);
err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
if (err)
goto out;
/* Flow rule offloaded to non-uplink representor sharing tc block,
* set the flow's owner dev.
*/
if (is_flow_rule_duplicate_allowed(dev, rpriv))
flow->orig_dev = dev;
err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err)
goto err_free;
mlx5_esw_release(priv->mdev);
return 0;
err_free:
mlx5e_flow_put(priv, flow);
out:
mlx5e_tc_unblock_ipsec_offload(dev, priv);
mlx5_esw_put(priv->mdev);
esw_release:
mlx5_esw_release(priv->mdev);
return err;
}
static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
{
bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
return flow_flag_test(flow, INGRESS) == dir_ingress &&
flow_flag_test(flow, EGRESS) == dir_egress;
}
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
int err;
rcu_read_lock();
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
if (!flow || !same_flow_direction(flow, flags)) {
err = -EINVAL;
goto errout;
}
/* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
* set.
*/
if (flow_flag_test_and_set(flow, DELETED)) {
err = -EINVAL;
goto errout;
}
rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
rcu_read_unlock();
trace_mlx5e_delete_flower(f);
mlx5e_flow_put(priv, flow);
mlx5e_tc_unblock_ipsec_offload(dev, priv);
mlx5_esw_put(priv->mdev);
return 0;
errout:
rcu_read_unlock();
return err;
}
int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act)
{
return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act);
}
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
u64 lastuse = 0;
u64 packets = 0;
u64 bytes = 0;
int err = 0;
rcu_read_lock();
flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
tc_ht_params));
rcu_read_unlock();
if (IS_ERR(flow))
return PTR_ERR(flow);
if (!same_flow_direction(flow, flags)) {
err = -EINVAL;
goto errout;
}
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, USE_ACT_STATS)) {
f->use_act_stats = true;
} else {
counter = mlx5e_tc_get_counter(flow);
if (!counter)
goto errout;
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
}
}
/* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded.
*/
if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
goto out;
if (flow_flag_test(flow, DUP)) {
struct mlx5e_tc_flow *peer_flow;
list_for_each_entry(peer_flow, &flow->peer_flows, peer_flows) {
u64 packets2;
u64 lastuse2;
u64 bytes2;
if (!flow_flag_test(peer_flow, OFFLOADED))
continue;
if (flow_flag_test(flow, USE_ACT_STATS)) {
f->use_act_stats = true;
break;
}
counter = mlx5e_tc_get_counter(peer_flow);
if (!counter)
goto no_peer_counter;
mlx5_fc_query_cached(counter, &bytes2, &packets2,
&lastuse2);
bytes += bytes2;
packets += packets2;
lastuse = max_t(u64, lastuse, lastuse2);
}
}
no_peer_counter:
if (esw)
mlx5_devcom_for_each_peer_end(esw->devcom);
out:
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
trace_mlx5e_stats_flower(f);
errout:
mlx5e_flow_put(priv, flow);
return err;
}
static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch *esw;
u32 rate_mbps = 0;
u16 vport_num;
int err;
vport_num = rpriv->rep->vport;
if (vport_num >= MLX5_VPORT_ECPF) {
NL_SET_ERR_MSG_MOD(extack,
"Ingress rate limit is supported only for Eswitch ports connected to VFs");
return -EOPNOTSUPP;
}
esw = priv->mdev->priv.eswitch;
/* rate is given in bytes/sec.
* First convert to bits/sec and then round to the nearest mbit/secs.
* mbit means million bits.
* Moreover, if rate is non zero we choose to configure to a minimum of
* 1 mbit/sec.
*/
if (rate) {
rate = (rate * BITS_PER_BYTE) + 500000;
do_div(rate, 1000000);
rate_mbps = max_t(u32, rate, 1);
}
err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
if (err)
NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
return err;
}
static int
tc_matchall_police_validate(const struct flow_action *action,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when conform action is not continue");
return -EOPNOTSUPP;
}
if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when exceed action is not drop");
return -EOPNOTSUPP;
}
if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
!flow_action_is_last_entry(action, act)) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when conform action is ok, but action is not last");
return -EOPNOTSUPP;
}
if (act->police.peakrate_bytes_ps ||
act->police.avrate || act->police.overhead) {
NL_SET_ERR_MSG_MOD(extack,
"Offload not supported when peakrate/avrate/overhead is configured");
return -EOPNOTSUPP;
}
return 0;
}
static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct flow_action_entry *act;
int err;
int i;
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
return -EINVAL;
}
if (!flow_offload_has_one_action(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
return -EOPNOTSUPP;
}
if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
}
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
err = tc_matchall_police_validate(flow_action, act, extack);
if (err)
return err;
err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
if (err)
return err;
rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
return -EOPNOTSUPP;
}
}
return 0;
}
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct netlink_ext_ack *extack = ma->common.extack;
if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
}
return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
}
int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct netlink_ext_ack *extack = ma->common.extack;
return apply_police_params(priv, 0, extack);
}
void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct rtnl_link_stats64 cur_stats;
u64 dbytes;
u64 dpkts;
cur_stats = priv->stats.vf_vport;
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
FLOW_ACTION_HW_STATS_DELAYED);
}
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
struct mlx5e_hairpin_entry *hpe, *tmp;
LIST_HEAD(init_wait_list);
u16 peer_vhca_id;
int bkt;
if (!mlx5e_same_hw_devs(priv, peer_priv))
return;
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
mutex_lock(&tc->hairpin_tbl_lock);
hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
if (refcount_inc_not_zero(&hpe->refcnt))
list_add(&hpe->dead_peer_wait_list, &init_wait_list);
mutex_unlock(&tc->hairpin_tbl_lock);
list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
wait_for_completion(&hpe->res_ready);
if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
mlx5e_hairpin_put(priv, hpe);
}
}
static int mlx5e_tc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct mlx5e_priv *peer_priv;
struct mlx5e_tc_table *tc;
struct mlx5e_priv *priv;
if (ndev->netdev_ops != &mlx5e_netdev_ops ||
event != NETDEV_UNREGISTER ||
ndev->reg_state == NETREG_REGISTERED)
return NOTIFY_DONE;
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
priv = tc->priv;
peer_priv = netdev_priv(ndev);
if (priv == peer_priv ||
!(priv->netdev->features & NETIF_F_HW_TC))
return NOTIFY_DONE;
mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
return NOTIFY_DONE;
}
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_flow_table **ft = &tc->miss_t;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
int err = 0;
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_TC_MISS_LEVEL;
ft_attr.prio = 0;
ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
*ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(*ft)) {
err = PTR_ERR(*ft);
netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
}
return err;
}
static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
mlx5_destroy_flow_table(tc->miss_t);
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
struct mlx5_core_dev *dev = priv->mdev;
struct mapping_ctx *chains_mapping;
struct mlx5_chains_attr attr = {};
u64 mapping_id;
int err;
mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
mutex_init(&tc->t_lock);
mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
tc->priv = priv;
err = rhashtable_init(&tc->ht, &tc_ht_params);
if (err)
return err;
lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
mapping_id = mlx5_query_nic_system_image_guid(dev);
chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
sizeof(struct mlx5_mapped_obj),
MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
if (IS_ERR(chains_mapping)) {
err = PTR_ERR(chains_mapping);
goto err_mapping;
}
tc->mapping = chains_mapping;
err = mlx5e_tc_nic_create_miss_table(priv);
if (err)
goto err_chains;
if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
attr.default_ft = tc->miss_t;
attr.mapping = chains_mapping;
attr.fs_base_prio = MLX5E_TC_PRIO;
tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) {
err = PTR_ERR(tc->chains);
goto err_miss;
}
mlx5_chains_print_info(tc->chains);
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
&tc->netdevice_nn);
if (err) {
tc->netdevice_nb.notifier_call = NULL;
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
goto err_reg;
}
mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
tc->action_stats_handle = mlx5e_tc_act_stats_create();
if (IS_ERR(tc->action_stats_handle)) {
err = PTR_ERR(tc->action_stats_handle);
goto err_act_stats;
}
return 0;
err_act_stats:
unregister_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
&tc->netdevice_nn);
err_reg:
mlx5_tc_ct_clean(tc->ct);
mlx5e_tc_post_act_destroy(tc->post_act);
mlx5_chains_destroy(tc->chains);
err_miss:
mlx5e_tc_nic_destroy_miss_table(priv);
err_chains:
mapping_destroy(chains_mapping);
err_mapping:
rhashtable_destroy(&tc->ht);
return err;
}
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
{
struct mlx5e_tc_flow *flow = ptr;
struct mlx5e_priv *priv = flow->priv;
mlx5e_tc_del_flow(priv, flow);
kfree(flow);
}
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
debugfs_remove_recursive(tc->dfs_root);
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
&tc->netdevice_nn);
mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
mutex_destroy(&tc->hairpin_tbl_lock);
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
tc->t = NULL;
}
mutex_destroy(&tc->t_lock);
mlx5_tc_ct_clean(tc->ct);
mlx5e_tc_post_act_destroy(tc->post_act);
mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains);
mlx5e_tc_nic_destroy_miss_table(priv);
mlx5e_tc_act_stats_free(tc->action_stats_handle);
}
int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
{
int err;
err = rhashtable_init(tc_ht, &tc_ht_params);
if (err)
return err;
lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
return 0;
}
void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
{
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
}
int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
struct netdev_phys_item_id ppid;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
u64 mapping_id, key;
int err = 0;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
MLX5_FLOW_NAMESPACE_FDB);
uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
esw_chains(esw),
&esw->offloads.mod_hdr,
MLX5_FLOW_NAMESPACE_FDB,
uplink_priv->post_act);
uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
sizeof(struct tunnel_match_key),
TUNNEL_INFO_BITS_MASK, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_tun_mapping;
}
uplink_priv->tunnel_mapping = mapping;
/* Two last values are reserved for stack devices slow path table mark
* and bridge ingress push mark.
*/
mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
}
uplink_priv->tunnel_enc_opts_mapping = mapping;
uplink_priv->encap = mlx5e_tc_tun_init(priv);
if (IS_ERR(uplink_priv->encap)) {
err = PTR_ERR(uplink_priv->encap);
goto err_register_fib_notifier;
}
uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
if (IS_ERR(uplink_priv->action_stats_handle)) {
err = PTR_ERR(uplink_priv->action_stats_handle);
goto err_action_counter;
}
err = dev_get_port_parent_id(priv->netdev, &ppid, false);
if (!err) {
memcpy(&key, &ppid.id, sizeof(key));
mlx5_esw_offloads_devcom_init(esw, key);
}
return 0;
err_action_counter:
mlx5e_tc_tun_cleanup(uplink_priv->encap);
err_register_fib_notifier:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
return err;
}
void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
{
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
mlx5_esw_offloads_devcom_cleanup(esw);
mlx5e_tc_tun_cleanup(uplink_priv->encap);
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
mlx5e_tc_act_stats_free(uplink_priv->action_stats_handle);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
return atomic_read(&tc_ht->nelems);
}
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
{
struct mlx5e_tc_flow *flow, *tmp;
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++) {
if (i == mlx5_get_dev_index(esw->dev))
continue;
list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
mlx5e_tc_del_fdb_peers_flow(flow);
}
}
void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
{
struct mlx5_rep_uplink_priv *rpriv =
container_of(work, struct mlx5_rep_uplink_priv,
reoffload_flows_work);
struct mlx5e_tc_flow *flow, *tmp;
mutex_lock(&rpriv->unready_flows_lock);
list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
unready_flow_del(flow);
}
mutex_unlock(&rpriv->unready_flows_lock);
}
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *cls_flower,
unsigned long flags)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
flags);
case FLOW_CLS_DESTROY:
return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
flags);
case FLOW_CLS_STATS:
return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
flags);
default:
return -EOPNOTSUPP;
}
}
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
unsigned long flags = MLX5_TC_FLAG(INGRESS);
struct mlx5e_priv *priv = cb_priv;
if (!priv->netdev || !netif_device_present(priv->netdev))
return -EOPNOTSUPP;
if (mlx5e_is_uplink_rep(priv))
flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
else
flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
default:
return -EOPNOTSUPP;
}
}
static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5e_tc_update_priv *tc_priv,
u32 tunnel_id)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct tunnel_match_enc_opts enc_opts = {};
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct metadata_dst *tun_dst;
struct tunnel_match_key key;
u32 tun_id, enc_opts_id;
struct net_device *dev;
int err;
enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
tun_id = tunnel_id >> ENC_OPTS_BITS;
if (!tun_id)
return true;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
if (err) {
netdev_dbg(priv->netdev,
"Couldn't find tunnel for tun_id: %d, err: %d\n",
tun_id, err);
return false;
}
if (enc_opts_id) {
err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
enc_opts_id, &enc_opts);
if (err) {
netdev_dbg(priv->netdev,
"Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
enc_opts_id, err);
return false;
}
}
switch (key.enc_control.addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
key.enc_tp.dst, TUNNEL_KEY,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
key.enc_ip.tos, key.enc_ip.ttl,
key.enc_tp.dst, 0, TUNNEL_KEY,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
default:
netdev_dbg(priv->netdev,
"Couldn't restore tunnel, unsupported addr_type: %d\n",
key.enc_control.addr_type);
return false;
}
if (!tun_dst) {
netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
return false;
}
tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
if (enc_opts.key.len)
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
enc_opts.key.data,
enc_opts.key.len,
enc_opts.key.dst_opt_type);
skb_dst_set(skb, (struct dst_entry *)tun_dst);
dev = dev_get_by_index(&init_net, key.filter_ifindex);
if (!dev) {
netdev_dbg(priv->netdev,
"Couldn't find tunnel device with ifindex: %d\n",
key.filter_ifindex);
return false;
}
/* Set fwd_dev so we do dev_put() after datapath */
tc_priv->fwd_dev = dev;
skb->dev = dev;
return true;
}
static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id,
u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
{
struct mlx5e_priv *priv = netdev_priv(skb->dev);
struct tc_skb_ext *tc_skb_ext;
u64 act_miss_cookie;
u32 chain;
chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0;
act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ?
mapped_obj->act_miss_cookie : 0;
if (chain || act_miss_cookie) {
if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id))
return false;
tc_skb_ext = tc_skb_ext_alloc(skb);
if (!tc_skb_ext) {
WARN_ON(1);
return false;
}
if (act_miss_cookie) {
tc_skb_ext->act_miss_cookie = act_miss_cookie;
tc_skb_ext->act_miss = 1;
} else {
tc_skb_ext->chain = chain;
}
}
if (tc_priv)
return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
return true;
}
static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_mapped_obj *mapped_obj,
struct mlx5e_tc_update_priv *tc_priv)
{
if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
netdev_dbg(priv->netdev,
"Failed to restore tunnel info for sampled packet\n");
return;
}
mlx5e_tc_sample_skb(skb, mapped_obj);
}
static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_mapped_obj *mapped_obj,
struct mlx5e_tc_update_priv *tc_priv,
u32 tunnel_id)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
bool forward_tx = false;
/* Tunnel restore takes precedence over int port restore */
if (tunnel_id)
return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
mapped_obj->int_port_metadata, &forward_tx)) {
/* Set fwd_dev for future dev_put */
tc_priv->fwd_dev = skb->dev;
tc_priv->forward_tx = forward_tx;
return true;
}
return false;
}
bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
struct mlx5_tc_ct_priv *ct_priv,
u32 zone_restore_id, u32 tunnel_id,
struct mlx5e_tc_update_priv *tc_priv)
{
struct mlx5e_priv *priv = netdev_priv(skb->dev);
struct mlx5_mapped_obj mapped_obj;
int err;
err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj);
if (err) {
netdev_dbg(skb->dev,
"Couldn't find mapped object for mapped_obj_id: %d, err: %d\n",
mapped_obj_id, err);
return false;
}
switch (mapped_obj.type) {
case MLX5_MAPPED_OBJ_CHAIN:
case MLX5_MAPPED_OBJ_ACT_MISS:
return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id,
tunnel_id, tc_priv);
case MLX5_MAPPED_OBJ_SAMPLE:
mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
tc_priv->skb_done = true;
return true;
case MLX5_MAPPED_OBJ_INT_PORT_METADATA:
return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id);
default:
netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
return false;
}
return false;
}
bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
{
struct mlx5e_priv *priv = netdev_priv(skb->dev);
u32 mapped_obj_id, reg_b, zone_restore_id;
struct mlx5_tc_ct_priv *ct_priv;
struct mapping_ctx *mapping_ctx;
struct mlx5e_tc_table *tc;
reg_b = be32_to_cpu(cqe->ft_metadata);
tc = mlx5e_fs_get_tc(priv->fs);
mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
ESW_ZONE_ID_MASK;
ct_priv = tc->ct;
mapping_ctx = tc->mapping;
return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id,
0, NULL);
}
static struct mapping_ctx *
mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc;
struct mlx5_eswitch *esw;
struct mapping_ctx *ctx;
if (is_mdev_switchdev_mode(priv->mdev)) {
esw = priv->mdev->priv.eswitch;
ctx = esw->offloads.reg_c0_obj_pool;
} else {
tc = mlx5e_fs_get_tc(priv->fs);
ctx = tc->mapping;
}
return ctx;
}
int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
u64 act_miss_cookie, u32 *act_miss_mapping)
{
struct mlx5_mapped_obj mapped_obj = {};
struct mlx5_eswitch *esw;
struct mapping_ctx *ctx;
int err;
ctx = mlx5e_get_priv_obj_mapping(priv);
mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
mapped_obj.act_miss_cookie = act_miss_cookie;
err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
if (err)
return err;
if (!is_mdev_switchdev_mode(priv->mdev))
return 0;
esw = priv->mdev->priv.eswitch;
attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
if (IS_ERR(attr->act_id_restore_rule))
goto err_rule;
return 0;
err_rule:
mapping_remove(ctx, *act_miss_mapping);
return err;
}
void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
u32 act_miss_mapping)
{
struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
if (is_mdev_switchdev_mode(priv->mdev))
mlx5_del_flow_rules(attr->act_id_restore_rule);
mapping_remove(ctx, act_miss_mapping);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
|
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/device.h>
#include <linux/netdevice.h>
#include "en.h"
#include "en/port.h"
#include "en/port_buffer.h"
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
#define MLX5E_100MB (100000)
#define MLX5E_1GB (1000000)
#define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0
/* Max supported cable length is 1000 meters */
#define MLX5E_MAX_CABLE_LENGTH 1000
enum {
MLX5E_VENDOR_TC_GROUP_NUM = 7,
MLX5E_LOWEST_PRIO_GROUP = 0,
};
enum {
MLX5_DCB_CHG_RESET,
MLX5_DCB_NO_CHG,
MLX5_DCB_CHG_NO_RESET,
};
#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
MLX5_CAP_QCAM_REG(mdev, qpts) && \
MLX5_CAP_QCAM_REG(mdev, qpdpm))
static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
enum mlx5_dcbx_oper_mode mode)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 param[MLX5_ST_SZ_DW(dcbx_param)];
int err;
err = mlx5_query_port_dcbx_param(mdev, param);
if (err)
return err;
MLX5_SET(dcbx_param, param, version_admin, mode);
if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
MLX5_SET(dcbx_param, param, willing_admin, 1);
return mlx5_set_port_dcbx_param(mdev, param);
}
static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
int err;
if (!MLX5_CAP_GEN(priv->mdev, dcbx))
return 0;
if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
return 0;
err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
if (err)
return err;
dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
return 0;
}
static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
struct ieee_ets *ets)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
bool is_tc_group_6_exist = false;
bool is_zero_bw_ets_tc = false;
int err = 0;
int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
}
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
if (err)
return err;
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
is_zero_bw_ets_tc = true;
if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
is_tc_group_6_exist = true;
}
/* Report 0% ets tc if exits*/
if (is_zero_bw_ets_tc) {
for (i = 0; i < ets->ets_cap; i++)
if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
ets->tc_tx_bw[i] = 0;
}
/* Update tc_tsa based on fw setting*/
for (i = 0; i < ets->ets_cap; i++) {
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
!is_tc_group_6_exist)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
}
memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
return err;
}
static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
{
bool any_tc_mapped_to_ets = false;
bool ets_zero_bw = false;
int strict_group;
int i;
for (i = 0; i <= max_tc; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
any_tc_mapped_to_ets = true;
if (!ets->tc_tx_bw[i])
ets_zero_bw = true;
}
}
/* strict group has higher priority than ets group */
strict_group = MLX5E_LOWEST_PRIO_GROUP;
if (any_tc_mapped_to_ets)
strict_group++;
if (ets_zero_bw)
strict_group++;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
break;
case IEEE_8021QAZ_TSA_STRICT:
tc_group[i] = strict_group++;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
if (ets->tc_tx_bw[i] && ets_zero_bw)
tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
break;
}
}
}
static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
u8 *tc_group, int max_tc)
{
int bw_for_ets_zero_bw_tc = 0;
int last_ets_zero_bw_tc = -1;
int num_ets_zero_bw = 0;
int i;
for (i = 0; i <= max_tc; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
!ets->tc_tx_bw[i]) {
num_ets_zero_bw++;
last_ets_zero_bw_tc = i;
}
}
if (num_ets_zero_bw)
bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_STRICT:
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
tc_tx_bw[i] = ets->tc_tx_bw[i] ?
ets->tc_tx_bw[i] :
bw_for_ets_zero_bw_tc;
break;
}
}
/* Make sure the total bw for ets zero bw group is 100% */
if (last_ets_zero_bw_tc != -1)
tc_tx_bw[last_ets_zero_bw_tc] +=
MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
}
/* If there are ETS BW 0,
* Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
* Set group #0 to all the ETS BW 0 tcs and
* equally splits the 100% BW between them
* Report both group #0 and #1 as ETS type.
* All the tcs in group #0 will be reported with 0% BW.
*/
static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
int max_tc = mlx5_max_tc(mdev);
int err, i;
mlx5e_build_tc_group(ets, tc_group, max_tc);
mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
if (err)
return err;
err = mlx5_set_port_tc_group(mdev, tc_group);
if (err)
return err;
err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
if (err)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
netdev_dbg(priv->netdev, "%s: prio_%d <=> tc_%d\n",
__func__, i, ets->prio_tc[i]);
netdev_dbg(priv->netdev, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
__func__, i, tc_tx_bw[i], tc_group[i]);
}
return err;
}
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
struct ieee_ets *ets,
bool zero_sum_allowed)
{
bool have_ets_tc = false;
int bw_sum = 0;
int i;
/* Validate Priority */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
netdev_err(netdev,
"Failed to validate ETS: priority value greater than max(%d)\n",
MLX5E_MAX_PRIORITY);
return -EINVAL;
}
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
have_ets_tc = true;
bw_sum += ets->tc_tx_bw[i];
}
}
if (have_ets_tc && bw_sum != 100) {
if (bw_sum || (!bw_sum && !zero_sum_allowed))
netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
}
return 0;
}
static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct ieee_ets *ets)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
if (err)
return err;
err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
if (err)
return err;
return 0;
}
static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
int i;
pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
}
if (MLX5_BUFFER_SUPPORTED(mdev))
pfc->delay = priv->dcbx.cable_len;
return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
}
static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 old_cable_len = priv->dcbx.cable_len;
struct ieee_pfc pfc_new;
u32 changed = 0;
u8 curr_pfc_en;
int ret = 0;
/* pfc_en */
mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
if (pfc->pfc_en != curr_pfc_en) {
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
if (ret)
return ret;
mlx5_toggle_port_link(mdev);
changed |= MLX5E_PORT_BUFFER_PFC;
}
if (pfc->delay &&
pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
pfc->delay != priv->dcbx.cable_len) {
priv->dcbx.cable_len = pfc->delay;
changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
}
if (MLX5_BUFFER_SUPPORTED(mdev)) {
pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
if (priv->dcbx.manual_buffer)
ret = mlx5e_port_manual_buffer_config(priv, changed,
dev->mtu, &pfc_new,
NULL, NULL);
if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
priv->dcbx.cable_len = old_cable_len;
}
if (!ret) {
netdev_dbg(dev,
"%s: PFC per priority bit mask: 0x%x\n",
__func__, pfc->pfc_en);
}
return ret;
}
static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return priv->dcbx.cap;
}
static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_dcbx *dcbx = &priv->dcbx;
if (mode & DCB_CAP_DCBX_LLD_MANAGED)
return 1;
if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
return 0;
/* set dcbx to fw controlled */
if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
dcbx->cap &= ~DCB_CAP_DCBX_HOST;
return 0;
}
return 1;
}
if (!(mode & DCB_CAP_DCBX_HOST))
return 1;
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
return 1;
dcbx->cap = mode;
return 0;
}
static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct dcb_app temp;
bool is_new;
int err;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EOPNOTSUPP;
if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
(app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL;
/* Save the old entry info */
temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
temp.protocol = app->protocol;
temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
/* Check if need to switch to dscp trust state */
if (!priv->dcbx.dscp_app_cnt) {
err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
if (err)
return err;
}
/* Skip the fw command if new and old mapping are the same */
if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
if (err)
goto fw_err;
}
/* Delete the old entry if exists */
is_new = false;
err = dcb_ieee_delapp(dev, &temp);
if (err)
is_new = true;
/* Add new entry and update counter */
err = dcb_ieee_setapp(dev, app);
if (err)
return err;
if (is_new)
priv->dcbx.dscp_app_cnt++;
return err;
fw_err:
mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
return err;
}
static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
!MLX5_DSCP_SUPPORTED(priv->mdev))
return -EOPNOTSUPP;
if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
(app->protocol >= MLX5E_MAX_DSCP))
return -EINVAL;
/* Skip if no dscp app entry */
if (!priv->dcbx.dscp_app_cnt)
return -ENOENT;
/* Check if the entry matches fw setting */
if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
return -ENOENT;
/* Delete the app entry */
err = dcb_ieee_delapp(dev, app);
if (err)
return err;
/* Reset the priority mapping back to zero */
err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
if (err)
goto fw_err;
priv->dcbx.dscp_app_cnt--;
/* Check if need to switch to pcp trust state */
if (!priv->dcbx.dscp_app_cnt)
err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
return err;
fw_err:
mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
return err;
}
static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
struct ieee_maxrate *maxrate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
int err;
int i;
err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
if (err)
return err;
memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
switch (max_bw_unit[i]) {
case MLX5_100_MBPS_UNIT:
maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
break;
case MLX5_GBPS_UNIT:
maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
break;
case MLX5_BW_NO_LIMIT:
break;
default:
WARN(true, "non-supported BW unit");
break;
}
}
return 0;
}
static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
struct ieee_maxrate *maxrate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
__u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
int i;
memset(max_bw_value, 0, sizeof(max_bw_value));
memset(max_bw_unit, 0, sizeof(max_bw_unit));
for (i = 0; i <= mlx5_max_tc(mdev); i++) {
if (!maxrate->tc_maxrate[i]) {
max_bw_unit[i] = MLX5_BW_NO_LIMIT;
continue;
}
if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_100MB);
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
} else {
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
MLX5E_1GB);
max_bw_unit[i] = MLX5_GBPS_UNIT;
}
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
__func__, i, max_bw_value[i]);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
}
static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
struct mlx5_core_dev *mdev = priv->mdev;
struct ieee_ets ets;
struct ieee_pfc pfc;
int err = -EOPNOTSUPP;
int i;
if (!MLX5_CAP_GEN(mdev, ets))
goto out;
memset(&ets, 0, sizeof(ets));
memset(&pfc, 0, sizeof(pfc));
ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
netdev_dbg(netdev,
"%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
__func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
ets.prio_tc[i]);
}
err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
if (err)
goto out;
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
if (err) {
netdev_err(netdev,
"%s, Failed to set ETS: %d\n", __func__, err);
goto out;
}
/* Set PFC */
pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
if (!cee_cfg->pfc_enable)
pfc.pfc_en = 0;
else
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
if (err) {
netdev_err(netdev,
"%s, Failed to set PFC: %d\n", __func__, err);
goto out;
}
out:
return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
}
static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
{
return MLX5E_CEE_STATE_UP;
}
static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
u8 *perm_addr)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (!perm_addr)
return;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
mlx5_query_mac_address(priv->mdev, perm_addr);
}
static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
int priority, u8 prio_type,
u8 pgid, u8 bw_pct, u8 up_map)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev,
"%s, priority is out of range\n", __func__);
return;
}
if (pgid >= CEE_DCBX_MAX_PGS) {
netdev_err(netdev,
"%s, priority group is out of range\n", __func__);
return;
}
cee_cfg->prio_to_pg_map[priority] = pgid;
}
static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
int pgid, u8 bw_pct)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
if (pgid >= CEE_DCBX_MAX_PGS) {
netdev_err(netdev,
"%s, priority group is out of range\n", __func__);
return;
}
cee_cfg->pg_bw_pct[pgid] = bw_pct;
}
static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
int priority, u8 *prio_type,
u8 *pgid, u8 *bw_pct, u8 *up_map)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(priv->mdev, ets)) {
netdev_err(netdev, "%s, ets is not supported\n", __func__);
return;
}
if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev,
"%s, priority is out of range\n", __func__);
return;
}
*prio_type = 0;
*bw_pct = 0;
*up_map = 0;
if (mlx5_query_port_prio_tc(mdev, priority, pgid))
*pgid = 0;
}
static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
struct ieee_ets ets;
if (pgid >= CEE_DCBX_MAX_PGS) {
netdev_err(netdev,
"%s, priority group is out of range\n", __func__);
return;
}
mlx5e_dcbnl_ieee_getets(netdev, &ets);
*bw_pct = ets.tc_tx_bw[pgid];
}
static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
int priority, u8 setting)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev,
"%s, priority is out of range\n", __func__);
return;
}
if (setting > 1)
return;
cee_cfg->pfc_setting[priority] = setting;
}
static int
mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
int priority, u8 *setting)
{
struct ieee_pfc pfc;
int err;
err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
if (err)
*setting = 0;
else
*setting = (pfc.pfc_en >> priority) & 0x01;
return err;
}
static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
int priority, u8 *setting)
{
if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev,
"%s, priority is out of range\n", __func__);
return;
}
if (!setting)
return;
mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
}
static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
int capid, u8 *cap)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 rval = 0;
switch (capid) {
case DCB_CAP_ATTR_PG:
*cap = true;
break;
case DCB_CAP_ATTR_PFC:
*cap = true;
break;
case DCB_CAP_ATTR_UP2TC:
*cap = false;
break;
case DCB_CAP_ATTR_PG_TCS:
*cap = 1 << mlx5_max_tc(mdev);
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 1 << mlx5_max_tc(mdev);
break;
case DCB_CAP_ATTR_GSP:
*cap = false;
break;
case DCB_CAP_ATTR_BCN:
*cap = false;
break;
case DCB_CAP_ATTR_DCBX:
*cap = priv->dcbx.cap |
DCB_CAP_DCBX_VER_CEE |
DCB_CAP_DCBX_VER_IEEE;
break;
default:
*cap = 0;
rval = 1;
break;
}
return rval;
}
static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
int tcs_id, u8 *num)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
switch (tcs_id) {
case DCB_NUMTCS_ATTR_PG:
case DCB_NUMTCS_ATTR_PFC:
*num = mlx5_max_tc(mdev) + 1;
break;
default:
return -EINVAL;
}
return 0;
}
static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
{
struct ieee_pfc pfc;
if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
return MLX5E_CEE_STATE_DOWN;
return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
}
static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
return;
cee_cfg->pfc_enable = state;
}
static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
struct dcbnl_buffer *dcb_buffer)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_port_buffer port_buffer;
u8 buffer[MLX5E_MAX_PRIORITY];
int i, err;
if (!MLX5_BUFFER_SUPPORTED(mdev))
return -EOPNOTSUPP;
err = mlx5e_port_query_priority2buffer(mdev, buffer);
if (err)
return err;
for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
dcb_buffer->prio2buffer[i] = buffer[i];
err = mlx5e_port_query_buffer(priv, &port_buffer);
if (err)
return err;
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
dcb_buffer->total_size = port_buffer.port_buffer_size -
port_buffer.internal_buffers_size;
return 0;
}
static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
struct dcbnl_buffer *dcb_buffer)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_port_buffer port_buffer;
u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
u32 *buffer_size = NULL;
u8 *prio2buffer = NULL;
u32 changed = 0;
int i, err;
if (!MLX5_BUFFER_SUPPORTED(mdev))
return -EOPNOTSUPP;
for (i = 0; i < DCBX_MAX_BUFFERS; i++)
mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
if (err)
return err;
for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
prio2buffer = dcb_buffer->prio2buffer;
break;
}
}
err = mlx5e_port_query_buffer(priv, &port_buffer);
if (err)
return err;
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
changed |= MLX5E_PORT_BUFFER_SIZE;
buffer_size = dcb_buffer->buffer_size;
break;
}
}
if (!changed)
return 0;
priv->dcbx.manual_buffer = true;
err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
buffer_size, prio2buffer);
return err;
}
static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_getets = mlx5e_dcbnl_ieee_getets,
.ieee_setets = mlx5e_dcbnl_ieee_setets,
.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
.ieee_setapp = mlx5e_dcbnl_ieee_setapp,
.ieee_delapp = mlx5e_dcbnl_ieee_delapp,
.getdcbx = mlx5e_dcbnl_getdcbx,
.setdcbx = mlx5e_dcbnl_setdcbx,
.dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
.dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
/* CEE interfaces */
.setall = mlx5e_dcbnl_setall,
.getstate = mlx5e_dcbnl_getstate,
.getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
.setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
.setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
.getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
.getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
.setpfccfg = mlx5e_dcbnl_setpfccfg,
.getpfccfg = mlx5e_dcbnl_getpfccfg,
.getcap = mlx5e_dcbnl_getcap,
.getnumtcs = mlx5e_dcbnl_getnumtcs,
.getpfcstate = mlx5e_dcbnl_getpfcstate,
.setpfcstate = mlx5e_dcbnl_setpfcstate,
};
void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
}
static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
enum mlx5_dcbx_oper_mode *mode)
{
u32 out[MLX5_ST_SZ_DW(dcbx_param)];
*mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
if (!mlx5_query_port_dcbx_param(priv->mdev, out))
*mode = MLX5_GET(dcbx_param, out, version_oper);
/* From driver's point of view, we only care if the mode
* is host (HOST) or non-host (AUTO)
*/
if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
*mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
}
static void mlx5e_ets_init(struct mlx5e_priv *priv)
{
struct ieee_ets ets;
int err;
int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return;
memset(&ets, 0, sizeof(ets));
ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets.ets_cap; i++) {
ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
ets.prio_tc[i] = i;
}
if (ets.ets_cap > 1) {
/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
ets.prio_tc[0] = 1;
ets.prio_tc[1] = 0;
}
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
if (err)
netdev_err(priv->netdev,
"%s, Failed to init ETS: %d\n", __func__, err);
}
enum {
INIT,
DELETE,
};
static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
{
struct dcb_app temp;
int i;
if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
return;
if (!MLX5_DSCP_SUPPORTED(priv->mdev))
return;
/* No SEL_DSCP entry in non DSCP state */
if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
return;
temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
for (i = 0; i < MLX5E_MAX_DSCP; i++) {
temp.protocol = i;
temp.priority = priv->dcbx_dp.dscp2prio[i];
if (action == INIT)
dcb_ieee_setapp(priv->netdev, &temp);
else
dcb_ieee_delapp(priv->netdev, &temp);
}
priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
}
void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
{
mlx5e_dcbnl_dscp_app(priv, INIT);
}
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
{
mlx5e_dcbnl_dscp_app(priv, DELETE);
}
static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u8 trust_state)
{
mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
if (trust_state == MLX5_QPTS_TRUST_DSCP &&
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
}
static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
{
u8 *trust_state = context;
int err;
err = mlx5_set_trust_state(priv->mdev, *trust_state);
if (err)
return err;
WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
return 0;
}
static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
{
struct mlx5e_params new_params;
bool reset = true;
int err;
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_params,
trust_state);
/* Skip if tx_min_inline is the same */
if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
reset = false;
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_update_trust_state_hw,
&trust_state, reset);
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
{
int err;
err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
if (err)
return err;
priv->dcbx_dp.dscp2prio[dscp] = prio;
return err;
}
static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 trust_state;
int err;
if (!MLX5_DSCP_SUPPORTED(mdev)) {
WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
return 0;
}
err = mlx5_query_trust_state(priv->mdev, &trust_state);
if (err)
return err;
WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
/*
* Align the driver state with the register state.
* Temporary state change is required to enable the app list reset.
*/
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
mlx5e_dcbnl_delete_app(priv);
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
}
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);
err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
if (err)
return err;
return 0;
}
#define MLX5E_BUFFER_CELL_SHIFT 7
static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
return (1 << MLX5E_BUFFER_CELL_SHIFT);
if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
MLX5_REG_SBCAM, 0, 0))
return (1 << MLX5E_BUFFER_CELL_SHIFT);
return MLX5_GET(sbcam_reg, out, cap_cell_size);
}
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
mlx5e_trust_initialize(priv);
if (!MLX5_CAP_GEN(priv->mdev, qos))
return;
if (MLX5_CAP_GEN(priv->mdev, dcbx))
mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
DCB_CAP_DCBX_VER_IEEE;
if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
mlx5e_ets_init(priv);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
|
/*
* Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <net/tc_act/tc_gact.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
#include <net/geneve.h>
#include <linux/bpf.h>
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
#include "en/txrx.h"
#include "en_tc.h"
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/macsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
#include "lib/vxlan.h"
#include "lib/clock.h"
#include "en/port.h"
#include "en/xdp.h"
#include "lib/eq.h"
#include "en/monitor_stats.h"
#include "en/health.h"
#include "en/params.h"
#include "en/xsk/pool.h"
#include "en/xsk/setup.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en/hv_vhca_stats.h"
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
#include "en/htb.h"
#include "qos.h"
#include "en/trap.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
u16 umr_wqebbs, max_wqebbs;
bool striding_rq_umr;
striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
MLX5_CAP_ETH(mdev, reg_umr_sq);
if (!striding_rq_umr)
return false;
umr_wqebbs = mlx5e_mpwrq_umr_wqebbs(mdev, page_shift, umr_mode);
max_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
/* Sanity check; should never happen, because mlx5e_mpwrq_umr_wqebbs is
* calculated from mlx5e_get_max_sq_aligned_wqebbs.
*/
if (WARN_ON(umr_wqebbs > max_wqebbs))
return false;
return true;
}
void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 port_state;
bool up;
port_state = mlx5_query_vport_state(mdev,
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
0);
up = port_state == VPORT_STATE_UP;
if (up == netif_carrier_ok(priv->netdev))
netif_carrier_event(priv->netdev);
if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
} else {
netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
}
}
static void mlx5e_update_carrier_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
update_carrier_work);
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
mutex_unlock(&priv->state_lock);
}
static void mlx5e_update_stats_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
update_stats_work);
mutex_lock(&priv->state_lock);
priv->profile->update_stats(priv);
mutex_unlock(&priv->state_lock);
}
void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
{
if (!priv->profile->update_stats)
return;
if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state)))
return;
queue_work(priv->wq, &priv->update_stats_work);
}
static int async_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
struct mlx5_eqe *eqe = data;
if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
return NOTIFY_DONE;
switch (eqe->sub_type) {
case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
queue_work(priv->wq, &priv->update_carrier_work);
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
priv->events_nb.notifier_call = async_event;
mlx5_notifier_register(priv->mdev, &priv->events_nb);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
struct mlx5_devlink_trap_event_ctx *trap_event_ctx = data;
int err;
switch (event) {
case MLX5_DRIVER_EVENT_TYPE_TRAP:
err = mlx5e_handle_trap_event(priv, trap_event_ctx->trap);
if (err) {
trap_event_ctx->err = err;
return NOTIFY_BAD;
}
break;
default:
return NOTIFY_DONE;
}
return NOTIFY_OK;
}
static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
{
priv->blocking_events_nb.notifier_call = blocking_event;
mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
}
static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
{
mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
}
static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_mode)
{
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u32 sz;
sz = ALIGN(entries * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
return sz / MLX5_OCTWORD;
}
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
u16 octowords;
u8 ds_cnt;
ds_cnt = DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(rq->mdev, rq->mpwqe.page_shift,
rq->mpwqe.umr_mode),
MLX5_SEND_WQE_DS);
cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
ds_cnt);
cseg->umr_mkey = rq->mpwqe.umr_mkey_be;
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
octowords = mlx5e_mpwrq_umr_octowords(rq->mpwqe.pages_per_wqe, rq->mpwqe.umr_mode);
ucseg->xlt_octowords = cpu_to_be16(octowords);
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
{
rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
GFP_KERNEL, node);
if (!rq->mpwqe.shampo)
return -ENOMEM;
return 0;
}
static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
{
kvfree(rq->mpwqe.shampo);
}
static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
node);
shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
sizeof(*shampo->info)),
GFP_KERNEL, node);
shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
sizeof(*shampo->pages)),
GFP_KERNEL, node);
if (!shampo->bitmap || !shampo->info || !shampo->pages)
goto err_nomem;
return 0;
err_nomem:
kvfree(shampo->info);
kvfree(shampo->bitmap);
kvfree(shampo->pages);
return -ENOMEM;
}
static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
{
kvfree(rq->mpwqe.shampo->bitmap);
kvfree(rq->mpwqe.shampo->info);
kvfree(rq->mpwqe.shampo->pages);
}
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
size_t alloc_size;
alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info,
alloc_units.frag_pages,
rq->mpwqe.pages_per_wqe));
rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
if (!rq->mpwqe.info)
return -ENOMEM;
/* For deferred page release (release right before alloc), make sure
* that on first round release is not called.
*/
for (int i = 0; i < wq_sz; i++) {
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, i);
bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
}
mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe);
return 0;
}
static u8 mlx5e_mpwrq_access_mode(enum mlx5e_mpwrq_umr_mode umr_mode)
{
switch (umr_mode) {
case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
return MLX5_MKC_ACCESS_MODE_MTT;
case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
return MLX5_MKC_ACCESS_MODE_KSM;
case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
return MLX5_MKC_ACCESS_MODE_KLMS;
case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
return MLX5_MKC_ACCESS_MODE_KSM;
}
WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", umr_mode);
return 0;
}
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u32 npages, u8 page_shift, u32 *umr_mkey,
dma_addr_t filler_addr,
enum mlx5e_mpwrq_umr_mode umr_mode,
u32 xsk_chunk_size)
{
struct mlx5_mtt *mtt;
struct mlx5_ksm *ksm;
struct mlx5_klm *klm;
u32 octwords;
int inlen;
void *mkc;
u32 *in;
int err;
int i;
if ((umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED ||
umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE) &&
!MLX5_CAP_GEN(mdev, fixed_buffer_size)) {
mlx5_core_warn(mdev, "Unaligned AF_XDP requires fixed_buffer_size capability\n");
return -EINVAL;
}
octwords = mlx5e_mpwrq_umr_octowords(npages, umr_mode);
inlen = MLX5_FLEXIBLE_INLEN(mdev, MLX5_ST_SZ_BYTES(create_mkey_in),
MLX5_OCTWORD, octwords);
if (inlen < 0)
return inlen;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, mlx5e_mpwrq_access_mode(umr_mode));
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET64(mkc, mkc, len, npages << page_shift);
MLX5_SET(mkc, mkc, translations_octword_size, octwords);
if (umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)
MLX5_SET(mkc, mkc, log_page_size, page_shift - 2);
else if (umr_mode != MLX5E_MPWRQ_UMR_MODE_OVERSIZED)
MLX5_SET(mkc, mkc, log_page_size, page_shift);
MLX5_SET(create_mkey_in, in, translations_octword_actual_size, octwords);
/* Initialize the mkey with all MTTs pointing to a default
* page (filler_addr). When the channels are activated, UMR
* WQEs will redirect the RX WQEs to the actual memory from
* the RQ's pool, while the gaps (wqe_overflow) remain mapped
* to the default page.
*/
switch (umr_mode) {
case MLX5E_MPWRQ_UMR_MODE_OVERSIZED:
klm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0; i < npages; i++) {
klm[i << 1] = (struct mlx5_klm) {
.va = cpu_to_be64(filler_addr),
.bcount = cpu_to_be32(xsk_chunk_size),
.key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
};
klm[(i << 1) + 1] = (struct mlx5_klm) {
.va = cpu_to_be64(filler_addr),
.bcount = cpu_to_be32((1 << page_shift) - xsk_chunk_size),
.key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
};
}
break;
case MLX5E_MPWRQ_UMR_MODE_UNALIGNED:
ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0; i < npages; i++)
ksm[i] = (struct mlx5_ksm) {
.key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
.va = cpu_to_be64(filler_addr),
};
break;
case MLX5E_MPWRQ_UMR_MODE_ALIGNED:
mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0; i < npages; i++)
mtt[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(filler_addr),
};
break;
case MLX5E_MPWRQ_UMR_MODE_TRIPLE:
ksm = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for (i = 0; i < npages * 4; i++) {
ksm[i] = (struct mlx5_ksm) {
.key = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey),
.va = cpu_to_be64(filler_addr),
};
}
break;
}
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
kvfree(in);
return err;
}
static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
u64 nentries,
u32 *umr_mkey)
{
int inlen;
void *mkc;
u32 *in;
int err;
inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET(mkc, mkc, translations_octword_size, nentries);
MLX5_SET(mkc, mkc, length64, 1);
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
kvfree(in);
return err;
}
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0;
u32 wq_size = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
u32 num_entries, max_num_entries;
u32 umr_mkey;
int err;
max_num_entries = mlx5e_mpwrq_max_num_entries(mdev, rq->mpwqe.umr_mode);
/* Shouldn't overflow, the result is at most MLX5E_MAX_RQ_NUM_MTTS. */
if (WARN_ON_ONCE(check_mul_overflow(wq_size, (u32)rq->mpwqe.mtts_per_wqe,
&num_entries) ||
num_entries > max_num_entries))
mlx5_core_err(mdev, "%s: multiplication overflow: %u * %u > %u\n",
__func__, wq_size, rq->mpwqe.mtts_per_wqe,
max_num_entries);
err = mlx5e_create_umr_mkey(mdev, num_entries, rq->mpwqe.page_shift,
&umr_mkey, rq->wqe_overflow.addr,
rq->mpwqe.umr_mode, xsk_chunk_size);
rq->mpwqe.umr_mkey_be = cpu_to_be32(umr_mkey);
return err;
}
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq)
{
u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
max_klm_size, rq->mpwqe.shampo->hd_per_wq);
return -EINVAL;
}
return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
&rq->mpwqe.shampo->mkey);
}
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
struct mlx5e_wqe_frag_info next_frag = {};
struct mlx5e_wqe_frag_info *prev = NULL;
int i;
WARN_ON(rq->xsk_pool);
next_frag.frag_page = &rq->wqe.alloc_units->frag_pages[0];
/* Skip first release due to deferred release. */
next_frag.flags = BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *frag =
&rq->wqe.frags[i << rq->wqe.info.log_num_frags];
int f;
for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
/* Pages are assigned at runtime. */
next_frag.frag_page++;
next_frag.offset = 0;
if (prev)
prev->flags |= BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE);
}
*frag = next_frag;
/* prepare next */
next_frag.offset += frag_info[f].frag_stride;
prev = frag;
}
}
if (prev)
prev->flags |= BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE);
}
static void mlx5e_init_xsk_buffs(struct mlx5e_rq *rq)
{
int i;
/* Assumptions used by XSK batched allocator. */
WARN_ON(rq->wqe.info.num_frags != 1);
WARN_ON(rq->wqe.info.log_num_frags != 0);
WARN_ON(rq->wqe.info.arr[0].frag_stride != PAGE_SIZE);
/* Considering the above assumptions a fragment maps to a single
* xsk_buff.
*/
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
rq->wqe.frags[i].xskp = &rq->wqe.alloc_units->xsk_buffs[i];
/* Skip first release due to deferred release as WQES are
* not allocated yet.
*/
rq->wqe.frags[i].flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
}
}
static int mlx5e_init_wqe_alloc_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
int len = wq_sz << rq->wqe.info.log_num_frags;
struct mlx5e_wqe_frag_info *frags;
union mlx5e_alloc_units *aus;
int aus_sz;
if (rq->xsk_pool)
aus_sz = sizeof(*aus->xsk_buffs);
else
aus_sz = sizeof(*aus->frag_pages);
aus = kvzalloc_node(array_size(len, aus_sz), GFP_KERNEL, node);
if (!aus)
return -ENOMEM;
frags = kvzalloc_node(array_size(len, sizeof(*frags)), GFP_KERNEL, node);
if (!frags) {
kvfree(aus);
return -ENOMEM;
}
rq->wqe.alloc_units = aus;
rq->wqe.frags = frags;
if (rq->xsk_pool)
mlx5e_init_xsk_buffs(rq);
else
mlx5e_init_frags_partition(rq);
return 0;
}
static void mlx5e_free_wqe_alloc_info(struct mlx5e_rq *rq)
{
kvfree(rq->wqe.frags);
kvfree(rq->wqe.alloc_units);
}
static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
{
struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
mlx5e_reporter_rq_cqe_err(rq);
}
static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
rq->wqe_overflow.page = alloc_page(GFP_KERNEL);
if (!rq->wqe_overflow.page)
return -ENOMEM;
rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0,
PAGE_SIZE, rq->buff.map_dir);
if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) {
__free_page(rq->wqe_overflow.page);
return -ENOMEM;
}
return 0;
}
static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
{
dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE,
rq->buff.map_dir);
__free_page(rq->wqe_overflow.page);
}
static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
u32 xdp_frag_size, struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = c->mdev;
int err;
rq->wq_type = params->rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->priv = c->priv;
rq->tstamp = c->tstamp;
rq->clock = &mdev->clock;
rq->icosq = &c->icosq;
rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev;
rq->hw_mtu =
MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
rq->xdpsq = &c->rq_xdpsq;
rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
err = mlx5e_rq_set_handlers(rq, params, NULL);
if (err)
return err;
return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id,
xdp_frag_size);
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rqp,
struct mlx5e_rq *rq,
u32 *pool_size,
int node)
{
void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
int wq_size;
int err;
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return 0;
err = mlx5e_rq_shampo_hd_alloc(rq, node);
if (err)
goto out;
rq->mpwqe.shampo->hd_per_wq =
mlx5e_shampo_hd_per_wq(mdev, params, rqp);
err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
if (err)
goto err_shampo_hd;
err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
if (err)
goto err_shampo_info;
rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
if (!rq->hw_gro_data) {
err = -ENOMEM;
goto err_hw_gro_data;
}
rq->mpwqe.shampo->key =
cpu_to_be32(rq->mpwqe.shampo->mkey);
rq->mpwqe.shampo->hd_per_wqe =
mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
*pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
return 0;
err_hw_gro_data:
mlx5e_rq_shampo_hd_info_free(rq);
err_shampo_info:
mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
err_shampo_hd:
mlx5e_rq_shampo_hd_free(rq);
out:
return err;
}
static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
{
if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
return;
kvfree(rq->hw_gro_data);
mlx5e_rq_shampo_hd_info_free(rq);
mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
mlx5e_rq_shampo_hd_free(rq);
}
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rqp,
int node, struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = rq->mdev;
void *rqc = rqp->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 pool_size;
int wq_sz;
int err;
int i;
rqp->wq.db_numa_node = node;
INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
goto err_rq_xdp_prog;
err = mlx5e_alloc_mpwqe_rq_drop_page(rq);
if (err)
goto err_rq_wq_destroy;
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
rq->mpwqe.page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
rq->mpwqe.umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
rq->mpwqe.pages_per_wqe =
mlx5e_mpwrq_pages_per_wqe(mdev, rq->mpwqe.page_shift,
rq->mpwqe.umr_mode);
rq->mpwqe.umr_wqebbs =
mlx5e_mpwrq_umr_wqebbs(mdev, rq->mpwqe.page_shift,
rq->mpwqe.umr_mode);
rq->mpwqe.mtts_per_wqe =
mlx5e_mpwrq_mtts_per_wqe(mdev, rq->mpwqe.page_shift,
rq->mpwqe.umr_mode);
pool_size = rq->mpwqe.pages_per_wqe <<
mlx5e_mpwqe_get_log_rq_size(mdev, params, xsk);
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk) && params->xdp_prog)
pool_size *= 2; /* additional page per packet for the linear part */
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
goto err_rq_mkey;
err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
if (err)
goto err_free_mpwqe_info;
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
goto err_rq_xdp_prog;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
rq->wqe.info = rqp->frags_info;
rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
err = mlx5e_init_wqe_alloc_info(rq, node);
if (err)
goto err_rq_wq_destroy;
}
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
struct page_pool_params pp_params = { 0 };
pp_params.order = 0;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
pp_params.pool_size = pool_size;
pp_params.nid = node;
pp_params.dev = rq->pdev;
pp_params.napi = rq->cq.napi;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
/* page_pool can be used even when there is no rq->xdp_prog,
* given page_pool does not handle DMA mapping there is no
* required state to clear. And page_pool gracefully handle
* elevated refcnt.
*/
rq->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
goto err_free_by_rq_type;
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
goto err_destroy_page_pool;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
struct mlx5e_rx_wqe_ll *wqe =
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
u64 dma_offset = mul_u32_u32(i, rq->mpwqe.mtts_per_wqe) <<
rq->mpwqe.page_shift;
u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
0 : rq->buff.headroom;
wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count);
wqe->data[0].lkey = rq->mpwqe.umr_mkey_be;
} else {
struct mlx5e_rx_wqe_cyc *wqe =
mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
int f;
for (f = 0; f < rq->wqe.info.num_frags; f++) {
u32 frag_size = rq->wqe.info.arr[f].frag_size |
MLX5_HW_START_PADDING;
wqe->data[f].byte_count = cpu_to_be32(frag_size);
wqe->data[f].lkey = rq->mkey_be;
}
/* check if num_frags is not a pow of two */
if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
wqe->data[f].byte_count = 0;
wqe->data[f].lkey = params->terminate_lkey_be;
wqe->data[f].addr = 0;
}
}
}
INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
switch (params->rx_cq_moderation.cq_period_mode) {
case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
break;
case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
default:
rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
return 0;
err_destroy_page_pool:
page_pool_destroy(rq->page_pool);
err_free_by_rq_type:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_shampo(rq);
err_free_mpwqe_info:
kvfree(rq->mpwqe.info);
err_rq_mkey:
mlx5_core_destroy_mkey(mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_wqe_alloc_info(rq);
}
err_rq_wq_destroy:
mlx5_wq_destroy(&rq->wq_ctrl);
err_rq_xdp_prog:
if (params->xdp_prog)
bpf_prog_put(params->xdp_prog);
return err;
}
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
struct bpf_prog *old_prog;
if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
old_prog = rcu_dereference_protected(rq->xdp_prog,
lockdep_is_held(&rq->priv->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
}
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_wqe_alloc_info(rq);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
}
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
struct mlx5_core_dev *mdev = rq->mdev;
u8 ts_format;
void *in;
void *rqc;
void *wq;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
sizeof(u64) * rq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
ts_format = mlx5_is_real_time_rq(mdev) ?
MLX5_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
memcpy(rqc, param->rqc, sizeof(param->rqc));
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, ts_format, ts_format);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
MLX5_SET(wq, wq, log_headers_buffer_entry_num,
order_base_2(rq->mpwqe.shampo->hd_per_wq));
MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
}
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
kvfree(in);
return err;
}
static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
mlx5e_rqwq_reset(rq);
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
return err;
}
static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
{
struct mlx5_cqwq *cqwq = &rq->cq.wq;
struct mlx5_cqe64 *cqe;
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) {
while ((cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)))
mlx5_cqwq_pop(cqwq);
} else {
while ((cqe = mlx5_cqwq_get_cqe(cqwq)))
mlx5_cqwq_pop(cqwq);
}
mlx5_cqwq_update_db_record(cqwq);
}
int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
{
struct net_device *dev = rq->netdev;
int err;
err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
if (err) {
netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
return err;
}
mlx5e_free_rx_descs(rq);
mlx5e_flush_rq_cq(rq);
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err) {
netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
return err;
}
return 0;
}
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
struct mlx5_core_dev *mdev = rq->mdev;
void *in;
void *rqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
MLX5_SET(rqc, rqc, vsd, vsd);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
err = mlx5_core_modify_rq(mdev, rq->rqn, in);
kvfree(in);
return err;
}
void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
{
unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
do {
if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes)
return 0;
msleep(20);
} while (time_before(jiffies, exp_time));
netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
mlx5e_reporter_rx_timeout(rq);
return -ETIMEDOUT;
}
void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq;
u16 head;
int i;
if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return;
wq = &rq->mpwqe.wq;
head = wq->head;
/* Release WQEs that are in missing state: they have been
* popped from the list after completion but were not freed
* due to deferred release.
* Also free the linked-list reserved entry, hence the "+ 1".
*/
for (i = 0; i < mlx5_wq_ll_missing(wq) + 1; i++) {
rq->dealloc_wqe(rq, head);
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
u16 len;
len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
(rq->mpwqe.shampo->hd_per_wq - 1);
mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
}
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
}
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
__be16 wqe_ix_be;
u16 wqe_ix;
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
mlx5e_free_rx_missing_descs(rq);
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
wqe_ix_be = *wq->tail_next;
wqe_ix = be16_to_cpu(wqe_ix_be);
wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
0, true);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u16 missing = mlx5_wq_cyc_missing(wq);
u16 head = mlx5_wq_cyc_get_head(wq);
while (!mlx5_wq_cyc_is_empty(wq)) {
wqe_ix = mlx5_wq_cyc_get_tail(wq);
rq->dealloc_wqe(rq, wqe_ix);
mlx5_wq_cyc_pop(wq);
}
/* Missing slots might also contain unreleased pages due to
* deferred release.
*/
while (missing--) {
wqe_ix = mlx5_wq_cyc_ctr2ix(wq, head++);
rq->dealloc_wqe(rq, wqe_ix);
}
}
}
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5e_xsk_param *xsk, int node,
struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = rq->mdev;
int err;
if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
__set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
err = mlx5e_alloc_rq(params, xsk, param, node, rq);
if (err)
return err;
err = mlx5e_create_rq(rq, param);
if (err)
goto err_free_rq;
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_destroy_rq;
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
/* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render
* skb->checksum incorrect.
*/
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || params->xdp_prog)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state);
/* For CQE compression on striding RQ, use stride index provided by
* HW if capability is supported.
*/
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
__set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
/* For enhanced CQE compression packet processing. decompress
* session according to the enhanced layout.
*/
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) &&
MLX5_CAP_GEN(mdev, enhanced_cqe_compression))
__set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state);
return 0;
err_destroy_rq:
mlx5e_destroy_rq(rq);
err_free_rq:
mlx5e_free_rq(rq);
return err;
}
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
}
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
}
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
kvfree(sq->db.xdpi_fifo.xi);
kvfree(sq->db.wqe_info);
}
static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int entries;
size_t size;
/* upper bound for maximum num of entries of all xmit_modes. */
entries = roundup_pow_of_two(wq_sz * MLX5_SEND_WQEBB_NUM_DS *
MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO);
size = array_size(sizeof(*xdpi_fifo->xi), entries);
xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
xdpi_fifo->pc = &sq->xdpi_fifo_pc;
xdpi_fifo->cc = &sq->xdpi_fifo_cc;
xdpi_fifo->mask = entries - 1;
return 0;
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
size_t size;
int err;
size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
err = mlx5e_alloc_xdpsq_fifo(sq, numa);
if (err) {
mlx5e_free_xdpsq_db(sq);
return err;
}
return 0;
}
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_sq_param *param,
struct mlx5e_xdpsq *sq,
bool is_redirect)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
struct mlx5_wq_cyc *wq = &sq->wq;
int err;
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
sq->xsk_pool = xsk_pool;
sq->stats = sq->xsk_pool ?
&c->priv->channel_stats[c->ix]->xsksq :
is_redirect ?
&c->priv->channel_stats[c->ix]->xdpsq :
&c->priv->channel_stats[c->ix]->rq_xdpsq;
sq->stop_room = param->is_mpw ? mlx5e_stop_room_for_mpwqe(mdev) :
mlx5e_stop_room_for_max_wqe(mdev);
sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
if (err)
return err;
wq->db = &wq->db[MLX5_SND_DBR];
err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
return 0;
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
return err;
}
static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
{
mlx5e_free_xdpsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
{
kvfree(sq->db.wqe_info);
}
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
size_t size;
size = array_size(wq_sz, sizeof(*sq->db.wqe_info));
sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
return 0;
}
static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
{
struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
recover_work);
mlx5e_reporter_icosq_cqe_err(sq);
}
static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
{
struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
recover_work);
/* Not implemented yet. */
netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
}
static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5e_sq_param *param,
struct mlx5e_icosq *sq,
work_func_t recover_work_func)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
struct mlx5_wq_cyc *wq = &sq->wq;
int err;
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
if (err)
return err;
wq->db = &wq->db[MLX5_SND_DBR];
err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
INIT_WORK(&sq->recover_work, recover_work_func);
return 0;
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
return err;
}
static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
{
mlx5e_free_icosq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
kvfree(sq->db.wqe_info);
kvfree(sq->db.skb_fifo.fifo);
kvfree(sq->db.dma_fifo);
}
int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
sizeof(*sq->db.dma_fifo)),
GFP_KERNEL, numa);
sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
sizeof(*sq->db.skb_fifo.fifo)),
GFP_KERNEL, numa);
sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
sizeof(*sq->db.wqe_info)),
GFP_KERNEL, numa);
if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
}
sq->dma_fifo_mask = df_sz - 1;
sq->db.skb_fifo.pc = &sq->skb_fifo_pc;
sq->db.skb_fifo.cc = &sq->skb_fifo_cc;
sq->db.skb_fifo.mask = df_sz - 1;
return 0;
}
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
struct mlx5e_params *params,
struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq,
int tc)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
struct mlx5_wq_cyc *wq = &sq->wq;
int err;
sq->pdev = c->pdev;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
sq->channel = c;
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
sq->max_sq_mpw_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (mlx5_ipsec_device_caps(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
sq->stop_room = param->stop_room;
sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
if (err)
return err;
wq->db = &wq->db[MLX5_SND_DBR];
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
return 0;
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
return err;
}
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
u32 *sqn)
{
u8 ts_format;
void *in;
void *sqc;
void *wq;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
sizeof(u64) * csp->wq_ctrl->buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
ts_format = mlx5_is_real_time_sq(mdev) ?
MLX5_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
memcpy(sqc, param->sqc, sizeof(param->sqc));
MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
MLX5_SET(sqc, sqc, cqn, csp->cqn);
MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
MLX5_SET(sqc, sqc, ts_format, ts_format);
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index);
MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_sq(mdev, in, inlen, sqn);
kvfree(in);
return err;
}
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p)
{
u64 bitmask = 0;
void *in;
void *sqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
MLX5_SET(sqc, sqc, state, p->next_state);
if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
bitmask |= 1;
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
}
if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
bitmask |= 1 << 2;
MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
}
MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
err = mlx5_core_modify_sq(mdev, sqn, in);
kvfree(in);
return err;
}
static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
{
mlx5_core_destroy_sq(mdev, sqn);
}
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
u16 qos_queue_group_id,
u32 *sqn)
{
struct mlx5e_modify_sq_param msp = {0};
int err;
err = mlx5e_create_sq(mdev, param, csp, sqn);
if (err)
return err;
msp.curr_state = MLX5_SQC_STATE_RST;
msp.next_state = MLX5_SQC_STATE_RDY;
if (qos_queue_group_id) {
msp.qos_update = true;
msp.qos_queue_group_id = qos_queue_group_id;
}
err = mlx5e_modify_sq(mdev, *sqn, &msp);
if (err)
mlx5e_destroy_sq(mdev, *sqn);
return err;
}
static int mlx5e_set_sq_maxrate(struct net_device *dev,
struct mlx5e_txqsq *sq, u32 rate);
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
struct mlx5e_sq_stats *sq_stats)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
int err;
err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
if (err)
return err;
sq->stats = sq_stats;
csp.tisn = tisn;
csp.tis_lst_sz = 1;
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
tx_rate = c->priv->tx_rates[sq->txq_ix];
if (tx_rate)
mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
if (params->tx_dim_enabled)
sq->state |= BIT(MLX5E_SQ_STATE_DIM);
return 0;
err_free_txqsq:
mlx5e_free_txqsq(sq);
return err;
}
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
{
sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
}
void mlx5e_tx_disable_queue(struct netdev_queue *txq)
{
__netif_tx_lock_bh(txq);
netif_tx_stop_queue(txq);
__netif_tx_unlock_bh(txq);
}
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
mlx5e_tx_disable_queue(sq->txq);
/* last doorbell out, godspeed .. */
if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
struct mlx5e_tx_wqe *nop;
sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) {
.num_wqebbs = 1,
};
nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
}
}
void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
cancel_work_sync(&sq->dim.work);
cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
rl.rate = sq->rate_limit;
mlx5_rl_remove_rate(mdev, &rl);
}
mlx5e_free_txqsq_descs(sq);
mlx5e_free_txqsq(sq);
}
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
{
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
mlx5e_reporter_tx_err_cqe(sq);
}
static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
work_func_t recover_work_func)
{
struct mlx5e_create_sq_param csp = {};
int err;
err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
if (err)
return err;
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
if (param->is_tls) {
sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list();
if (IS_ERR(sq->ktls_resync)) {
err = PTR_ERR(sq->ktls_resync);
goto err_destroy_icosq;
}
}
return 0;
err_destroy_icosq:
mlx5e_destroy_sq(c->mdev, sq->sqn);
err_free_icosq:
mlx5e_free_icosq(sq);
return err;
}
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
{
set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
}
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
{
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
synchronize_net(); /* Sync with NAPI. */
}
static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
{
struct mlx5e_channel *c = sq->channel;
if (sq->ktls_resync)
mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync);
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_icosq_descs(sq);
mlx5e_free_icosq(sq);
}
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect)
{
struct mlx5e_create_sq_param csp = {};
int err;
err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
if (err)
return err;
csp.tis_lst_sz = 1;
csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
if (param->is_xdp_mb)
set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
mlx5e_set_xmit_fp(sq, param->is_mpw);
if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) {
unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1;
unsigned int inline_hdr_sz = 0;
int i;
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
ds_cnt++;
}
/* Pre initialize fixed WQE fields */
for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) {
.num_wqebbs = 1,
.num_pkts = 1,
};
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
}
}
return 0;
err_free_xdpsq:
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
mlx5e_free_xdpsq(sq);
return err;
}
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
{
struct mlx5e_channel *c = sq->channel;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
synchronize_net(); /* Sync with NAPI. */
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_xdpsq_descs(sq);
mlx5e_free_xdpsq(sq);
}
static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int err;
u32 i;
err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
return err;
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
*mcq->set_ci_db = 0;
*mcq->arm_db = 0;
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe->op_own = 0xf1;
cqe->validity_iteration_count = 0xff;
}
cq->mdev = mdev;
cq->netdev = priv->netdev;
cq->priv = priv;
return 0;
}
static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{
int err;
param->wq.buf_numa_node = ccp->node;
param->wq.db_numa_node = ccp->node;
param->eq_ix = ccp->ix;
err = mlx5e_alloc_cq_common(priv, param, cq);
cq->napi = ccp->napi;
cq->ch_stats = ccp->ch_stats;
return err;
}
static void mlx5e_free_cq(struct mlx5e_cq *cq)
{
mlx5_wq_destroy(&cq->wq_ctrl);
}
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
struct mlx5_core_dev *mdev = cq->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
void *in;
void *cqc;
int inlen;
int eqn;
int err;
err = mlx5_comp_eqn_get(mdev, param->eq_ix, &eqn);
if (err)
return err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
memcpy(cqc, param->cqc, sizeof(param->cqc));
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
kvfree(in);
if (err)
return err;
mlx5e_cq_arm(cq);
return 0;
}
static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
{
mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
}
int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
err = mlx5e_alloc_cq(priv, param, ccp, cq);
if (err)
return err;
err = mlx5e_create_cq(cq, param);
if (err)
goto err_free_cq;
if (MLX5_CAP_GEN(mdev, cq_moderation))
mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
return 0;
err_free_cq:
mlx5e_free_cq(cq);
return err;
}
void mlx5e_close_cq(struct mlx5e_cq *cq)
{
mlx5e_destroy_cq(cq);
mlx5e_free_cq(cq);
}
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_create_cq_param *ccp,
struct mlx5e_channel_param *cparam)
{
int err;
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
ccp, &c->sq[tc].cq);
if (err)
goto err_close_tx_cqs;
}
return 0;
err_close_tx_cqs:
for (tc--; tc >= 0; tc--)
mlx5e_close_cq(&c->sq[tc].cq);
return err;
}
static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
{
int tc;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_close_cq(&c->sq[tc].cq);
}
static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
{
int tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++)
if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
return tc;
WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
return -ENOENT;
}
static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
u32 *hw_id)
{
int tc;
if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL) {
*hw_id = 0;
return 0;
}
tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
if (tc < 0)
return tc;
if (tc >= params->mqprio.num_tc) {
WARN(1, "Unexpected TCs configuration. tc %d is out of range of %u",
tc, params->mqprio.num_tc);
return -EINVAL;
}
*hw_id = params->mqprio.channel.hw_id[tc];
return 0;
}
static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
int err, tc;
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
u32 qos_queue_group_id;
err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
if (err)
goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->txq_sq, &c->sq[tc], tc,
qos_queue_group_id,
&c->priv->channel_stats[c->ix]->sq[tc]);
if (err)
goto err_close_sqs;
}
return 0;
err_close_sqs:
for (tc--; tc >= 0; tc--)
mlx5e_close_txqsq(&c->sq[tc]);
return err;
}
static void mlx5e_close_sqs(struct mlx5e_channel *c)
{
int tc;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_close_txqsq(&c->sq[tc]);
}
static int mlx5e_set_sq_maxrate(struct net_device *dev,
struct mlx5e_txqsq *sq, u32 rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_modify_sq_param msp = {0};
struct mlx5_rate_limit rl = {0};
u16 rl_index = 0;
int err;
if (rate == sq->rate_limit)
/* nothing to do */
return 0;
if (sq->rate_limit) {
rl.rate = sq->rate_limit;
/* remove current rl index to free space to next ones */
mlx5_rl_remove_rate(mdev, &rl);
}
sq->rate_limit = 0;
if (rate) {
rl.rate = rate;
err = mlx5_rl_add_rate(mdev, &rl_index, &rl);
if (err) {
netdev_err(dev, "Failed configuring rate %u: %d\n",
rate, err);
return err;
}
}
msp.curr_state = MLX5_SQC_STATE_RDY;
msp.next_state = MLX5_SQC_STATE_RDY;
msp.rl_index = rl_index;
msp.rl_update = true;
err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
if (err) {
netdev_err(dev, "Failed configuring rate %u: %d\n",
rate, err);
/* remove the rate from the table */
if (rate)
mlx5_rl_remove_rate(mdev, &rl);
return err;
}
sq->rate_limit = rate;
return 0;
}
static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_txqsq *sq = priv->txq2sq[index];
int err = 0;
if (!mlx5_rl_is_supported(mdev)) {
netdev_err(dev, "Rate limiting is not supported on this device\n");
return -EINVAL;
}
/* rate is given in Mb/sec, HW config is in Kb/sec */
rate = rate << 10;
/* Check whether rate in valid range, 0 is always valid */
if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
netdev_err(dev, "TX rate %u, is not in range\n", rate);
return -ERANGE;
}
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
err = mlx5e_set_sq_maxrate(dev, sq, rate);
if (!err)
priv->tx_rates[index] = rate;
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_params)
{
int err;
err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
if (err)
return err;
return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq);
}
static int mlx5e_open_queues(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
struct dim_cq_moder icocq_moder = {0, 0};
struct mlx5e_create_cq_param ccp;
int err;
mlx5e_build_create_cq_param(&ccp, c);
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
&c->async_icosq.cq);
if (err)
return err;
err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
goto err_close_async_icosq_cq;
err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
if (err)
goto err_close_icosq_cq;
err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
&c->xdpsq.cq);
if (err)
goto err_close_tx_cqs;
err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
&c->rq.cq);
if (err)
goto err_close_xdp_tx_cqs;
err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
&ccp, &c->rq_xdpsq.cq) : 0;
if (err)
goto err_close_rx_cq;
spin_lock_init(&c->async_icosq_lock);
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
mlx5e_async_icosq_err_cqe_work);
if (err)
goto err_close_xdpsq_cq;
mutex_init(&c->icosq_recovery_lock);
err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
mlx5e_icosq_err_cqe_work);
if (err)
goto err_close_async_icosq;
err = mlx5e_open_sqs(c, params, cparam);
if (err)
goto err_close_icosq;
err = mlx5e_open_rxq_rq(c, params, &cparam->rq);
if (err)
goto err_close_sqs;
if (c->xdp) {
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL,
&c->rq_xdpsq, false);
if (err)
goto err_close_rq;
}
err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true);
if (err)
goto err_close_xdp_sq;
return 0;
err_close_xdp_sq:
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
err_close_rq:
mlx5e_close_rq(&c->rq);
err_close_sqs:
mlx5e_close_sqs(c);
err_close_icosq:
mlx5e_close_icosq(&c->icosq);
err_close_async_icosq:
mlx5e_close_icosq(&c->async_icosq);
err_close_xdpsq_cq:
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
err_close_rx_cq:
mlx5e_close_cq(&c->rq.cq);
err_close_xdp_tx_cqs:
mlx5e_close_cq(&c->xdpsq.cq);
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
err_close_icosq_cq:
mlx5e_close_cq(&c->icosq.cq);
err_close_async_icosq_cq:
mlx5e_close_cq(&c->async_icosq.cq);
return err;
}
static void mlx5e_close_queues(struct mlx5e_channel *c)
{
mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
/* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
cancel_work_sync(&c->icosq.recover_work);
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mutex_destroy(&c->icosq_recovery_lock);
mlx5e_close_icosq(&c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_cq(&c->xdpsq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
mlx5e_close_cq(&c->async_icosq.cq);
}
static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
{
u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
}
static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
{
if (ix > priv->stats_nch) {
netdev_warn(priv->netdev, "Unexpected channel stats index %d > %d\n", ix,
priv->stats_nch);
return -EINVAL;
}
if (priv->channel_stats[ix])
return 0;
/* Asymmetric dynamic memory allocation.
* Freed in mlx5e_priv_arrays_free, not on channel closure.
*/
netdev_dbg(priv->netdev, "Creating channel stats %d\n", ix);
priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
GFP_KERNEL, cpu_to_node(cpu));
if (!priv->channel_stats[ix])
return -ENOMEM;
priv->stats_nch++;
return 0;
}
void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c)
{
spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq);
spin_unlock_bh(&c->async_icosq_lock);
}
void mlx5e_trigger_napi_sched(struct napi_struct *napi)
{
local_bh_disable();
napi_schedule(napi);
local_bh_enable();
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
int cpu = mlx5_comp_vector_get_cpu(priv->mdev, ix);
struct net_device *netdev = priv->netdev;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
unsigned int irq;
int err;
err = mlx5_comp_irqn_get(priv->mdev, ix, &irq);
if (err)
return err;
err = mlx5e_channel_stats_alloc(priv, ix, cpu);
if (err)
return err;
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
c->priv = priv;
c->mdev = priv->mdev;
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix]->ch;
c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll);
err = mlx5e_open_queues(c, params, cparam);
if (unlikely(err))
goto err_napi_del;
if (xsk_pool) {
mlx5e_build_xsk_param(xsk_pool, &xsk);
err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
if (unlikely(err))
goto err_close_queues;
}
*cp = c;
return 0;
err_close_queues:
mlx5e_close_queues(c);
err_napi_del:
netif_napi_del(&c->napi);
kvfree(c);
return err;
}
static void mlx5e_activate_channel(struct mlx5e_channel *c)
{
int tc;
napi_enable(&c->napi);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_icosq(&c->async_icosq);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
{
int tc;
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_deactivate_xsk(c);
else
mlx5e_deactivate_rq(&c->rq);
mlx5e_deactivate_icosq(&c->async_icosq);
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
mlx5e_qos_deactivate_queues(c);
napi_disable(&c->napi);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
mlx5e_qos_close_queues(c);
netif_napi_del(&c->napi);
kvfree(c);
}
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
struct mlx5e_channel_param *cparam;
int err = -ENOMEM;
int i;
chs->num = chs->params.num_channels;
chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
if (!chs->c || !cparam)
goto err_free;
err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
if (err)
goto err_free;
for (i = 0; i < chs->num; i++) {
struct xsk_buff_pool *xsk_pool = NULL;
if (chs->params.xdp_prog)
xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) {
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
if (err)
goto err_close_channels;
}
if (priv->htb) {
err = mlx5e_qos_open_queues(priv, chs);
if (err)
goto err_close_ptp;
}
mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
err_close_ptp:
if (chs->ptp)
mlx5e_ptp_close(chs->ptp);
err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
err_free:
kfree(chs->c);
kvfree(cparam);
chs->num = 0;
return err;
}
static void mlx5e_activate_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
int i;
for (i = 0; i < chs->num; i++)
mlx5e_activate_channel(chs->c[i]);
if (priv->htb)
mlx5e_qos_activate_queues(priv);
for (i = 0; i < chs->num; i++)
mlx5e_trigger_napi_icosq(chs->c[i]);
if (chs->ptp)
mlx5e_ptp_activate_channel(chs->ptp);
}
static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
{
int err = 0;
int i;
for (i = 0; i < chs->num; i++) {
int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
struct mlx5e_channel *c = chs->c[i];
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
continue;
err |= mlx5e_wait_for_min_rx_wqes(&c->rq, timeout);
/* Don't wait on the XSK RQ, because the newer xdpsock sample
* doesn't provide any Fill Ring entries at the setup stage.
*/
}
return err ? -ETIMEDOUT : 0;
}
static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
{
int i;
if (chs->ptp)
mlx5e_ptp_deactivate_channel(chs->ptp);
for (i = 0; i < chs->num; i++)
mlx5e_deactivate_channel(chs->c[i]);
}
void mlx5e_close_channels(struct mlx5e_channels *chs)
{
int i;
if (chs->ptp) {
mlx5e_ptp_close(chs->ptp);
chs->ptp = NULL;
}
for (i = 0; i < chs->num; i++)
mlx5e_close_channel(chs->c[i]);
kfree(chs->c);
chs->num = 0;
}
static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
}
static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
{
u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu);
int err;
err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
if (err)
return err;
/* Update vport context MTU */
mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
return 0;
}
static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 *mtu)
{
u16 hw_mtu = 0;
int err;
err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
if (err || !hw_mtu) /* fallback to port oper mtu */
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
*mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
}
int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
{
struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
u16 mtu;
int err;
err = mlx5e_set_mtu(mdev, params, params->sw_mtu);
if (err)
return err;
mlx5e_query_mtu(mdev, params, &mtu);
if (mtu != params->sw_mtu)
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
__func__, mtu, params->sw_mtu);
params->sw_mtu = mtu;
return 0;
}
MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
{
struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
/* MTU range: 68 - hw-specific max */
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
ETH_MAX_MTU);
}
static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc,
struct netdev_tc_txq *tc_to_txq)
{
int tc, err;
netdev_reset_tc(netdev);
if (ntc == 1)
return 0;
err = netdev_set_num_tc(netdev, ntc);
if (err) {
netdev_WARN(netdev, "netdev_set_num_tc failed (%d), ntc = %d\n", err, ntc);
return err;
}
for (tc = 0; tc < ntc; tc++) {
u16 count, offset;
count = tc_to_txq[tc].count;
offset = tc_to_txq[tc].offset;
netdev_set_tc_queue(netdev, tc, count, offset);
}
return 0;
}
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
{
int nch, ntc, num_txqs, err;
int qos_queues = 0;
if (priv->htb)
qos_queues = mlx5e_htb_cur_leaf_nodes(priv->htb);
nch = priv->channels.params.num_channels;
ntc = mlx5e_get_dcb_num_tc(&priv->channels.params);
num_txqs = nch * ntc + qos_queues;
if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
num_txqs += ntc;
netdev_dbg(priv->netdev, "Setting num_txqs %d\n", num_txqs);
err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
if (err)
netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
return err;
}
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
int old_num_txqs, old_ntc;
int nch, ntc;
int err;
int i;
old_num_txqs = netdev->real_num_tx_queues;
old_ntc = netdev->num_tc ? : 1;
for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++)
old_tc_to_txq[i] = netdev->tc_to_txq[i];
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.mqprio.num_tc;
tc_to_txq = priv->channels.params.mqprio.tc_to_txq;
err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq);
if (err)
goto err_out;
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
err = netif_set_real_num_rx_queues(netdev, nch);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
return 0;
err_txqs:
/* netif_set_real_num_rx_queues could fail only when nch increased. Only
* one of nch and ntc is changed in this function. That means, the call
* to netif_set_real_num_tx_queues below should not fail, because it
* decreases the number of TX queues.
*/
WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
err_tcs:
WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
old_tc_to_txq));
err_out:
return err;
}
static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_comp_vectors, ix, irq;
num_comp_vectors = mlx5_comp_vectors_max(mdev);
for (ix = 0; ix < params->num_channels; ix++) {
cpumask_clear(priv->scratchpad.cpumask);
for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
}
netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
}
}
static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
int err;
err = mlx5e_update_netdev_queues(priv);
if (err)
return err;
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
/* This function may be called on attach, before priv->rx_res is created. */
if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res)
mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
return 0;
}
MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed);
static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
{
int i, ch, tc, num_tc;
ch = priv->channels.num;
num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
for (i = 0; i < ch; i++) {
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
}
}
if (!priv->channels.ptp)
goto out;
if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
goto out;
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_ptp *c = priv->channels.ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
}
out:
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
* which pairs with this barrier.
*/
smp_wmb();
}
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(priv, &priv->channels);
mlx5e_xdp_tx_enable(priv);
/* dev_watchdog() wants all TX queues to be started when the carrier is
* OK, including the ones in range real_num_tx_queues..num_tx_queues-1.
* Make it happy to avoid TX timeout false alarms.
*/
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_activate_channels(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
if (priv->rx_res)
mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels);
}
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
if (priv->rx_res)
mlx5e_rx_res_channels_deactivate(priv->rx_res);
if (mlx5e_is_vport_rep(priv))
mlx5e_rep_deactivate_channels(priv);
/* The results of ndo_select_queue are unreliable, while netdev config
* is being changed (real_num_tx_queues, num_tc). Stop all queues to
* prevent ndo_start_xmit from being called, so that it can assume that
* the selected queue is always valid.
*/
netif_tx_disable(priv->netdev);
mlx5e_xdp_tx_disable(priv);
mlx5e_deactivate_channels(&priv->channels);
}
static int mlx5e_switch_priv_params(struct mlx5e_priv *priv,
struct mlx5e_params *new_params,
mlx5e_fp_preactivate preactivate,
void *context)
{
struct mlx5e_params old_params;
old_params = priv->channels.params;
priv->channels.params = *new_params;
if (preactivate) {
int err;
err = preactivate(priv, context);
if (err) {
priv->channels.params = old_params;
return err;
}
}
return 0;
}
static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
void *context)
{
struct net_device *netdev = priv->netdev;
struct mlx5e_channels old_chs;
int carrier_ok;
int err = 0;
carrier_ok = netif_carrier_ok(netdev);
netif_carrier_off(netdev);
mlx5e_deactivate_priv_channels(priv);
old_chs = priv->channels;
priv->channels = *new_chs;
/* New channels are ready to roll, call the preactivate hook if needed
* to modify HW settings or update kernel parameters.
*/
if (preactivate) {
err = preactivate(priv, context);
if (err) {
priv->channels = old_chs;
goto out;
}
}
mlx5e_close_channels(&old_chs);
priv->profile->update_rx(priv);
mlx5e_selq_apply(&priv->selq);
out:
mlx5e_activate_priv_channels(priv);
/* return carrier back if needed */
if (carrier_ok)
netif_carrier_on(netdev);
return err;
}
int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
struct mlx5e_params *params,
mlx5e_fp_preactivate preactivate,
void *context, bool reset)
{
struct mlx5e_channels *new_chs;
int err;
reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!reset)
return mlx5e_switch_priv_params(priv, params, preactivate, context);
new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL);
if (!new_chs)
return -ENOMEM;
new_chs->params = *params;
mlx5e_selq_prepare_params(&priv->selq, &new_chs->params);
err = mlx5e_open_channels(priv, new_chs);
if (err)
goto err_cancel_selq;
err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
if (err)
goto err_close;
kfree(new_chs);
return 0;
err_close:
mlx5e_close_channels(new_chs);
err_cancel_selq:
mlx5e_selq_cancel(&priv->selq);
kfree(new_chs);
return err;
}
int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
{
return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true);
}
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
}
static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
enum mlx5_port_status state)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int vport_admin_state;
mlx5_set_port_admin_status(mdev, state);
if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_GEN(mdev, uplink_follow))
return;
if (state == MLX5_PORT_UP)
vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
else
vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
}
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
mlx5e_selq_prepare_params(&priv->selq, &priv->channels.params);
set_bit(MLX5E_STATE_OPENED, &priv->state);
err = mlx5e_open_channels(priv, &priv->channels);
if (err)
goto err_clear_state_opened_flag;
err = priv->profile->update_rx(priv);
if (err)
goto err_close_channels;
mlx5e_selq_apply(&priv->selq);
mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
mlx5e_queue_update_stats(priv);
return 0;
err_close_channels:
mlx5e_close_channels(&priv->channels);
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_selq_cancel(&priv->selq);
return err;
}
int mlx5e_open(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev);
if (!err)
mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
return err;
}
int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
*/
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
mlx5e_apply_traps(priv, false);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
return 0;
}
int mlx5e_close(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
if (!netif_device_present(netdev))
return -ENODEV;
mutex_lock(&priv->state_lock);
mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
return err;
}
static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
{
mlx5_wq_destroy(&rq->wq_ctrl);
}
static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
int err;
param->wq.db_numa_node = param->wq.buf_numa_node;
err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
return err;
/* Mark as unused given "Drop-RQ" packets never reach XDP */
xdp_rxq_info_unused(&rq->xdp_rxq);
rq->mdev = mdev;
return 0;
}
static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
return mlx5e_alloc_cq_common(priv, param, cq);
}
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_cq_param cq_param = {};
struct mlx5e_rq_param rq_param = {};
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
mlx5e_build_drop_rq_param(mdev, priv->drop_rq_q_counter, &rq_param);
err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
if (err)
return err;
err = mlx5e_create_cq(cq, &cq_param);
if (err)
goto err_free_cq;
err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
if (err)
goto err_destroy_cq;
err = mlx5e_create_rq(drop_rq, &rq_param);
if (err)
goto err_free_rq;
err = mlx5e_modify_rq_state(drop_rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
mlx5_core_warn(priv->mdev, "modify_rq_state failed, rx_if_down_packets won't be counted %d\n", err);
return 0;
err_free_rq:
mlx5e_free_drop_rq(drop_rq);
err_destroy_cq:
mlx5e_destroy_cq(cq);
err_free_cq:
mlx5e_free_cq(cq);
return err;
}
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
{
mlx5e_destroy_rq(drop_rq);
mlx5e_free_drop_rq(drop_rq);
mlx5e_destroy_cq(&drop_rq->cq);
mlx5e_free_cq(&drop_rq->cq);
}
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn)
{
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
if (MLX5_GET(tisc, tisc, tls_en))
MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
if (mlx5_lag_is_lacp_owner(mdev))
MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
return mlx5_core_create_tis(mdev, in, tisn);
}
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
{
mlx5_core_destroy_tis(mdev, tisn);
}
void mlx5e_destroy_tises(struct mlx5e_priv *priv)
{
int tc, i;
for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
}
static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
}
int mlx5e_create_tises(struct mlx5e_priv *priv)
{
int tc, i;
int err;
for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
for (tc = 0; tc < priv->profile->max_tc; tc++) {
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
void *tisc;
tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, tc << 1);
if (mlx5e_lag_should_assign_affinity(priv->mdev))
MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
if (err)
goto err_close_tises;
}
}
return 0;
err_close_tises:
for (; i >= 0; i--) {
for (tc--; tc >= 0; tc--)
mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
tc = priv->profile->max_tc;
}
return err;
}
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
priv->mqprio_rl = NULL;
}
mlx5e_accel_cleanup_tx(priv);
mlx5e_destroy_tises(priv);
}
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
int err;
int i;
for (i = 0; i < chs->num; i++) {
err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
if (err)
return err;
}
if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state))
return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd);
return 0;
}
static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
int ntc, int nch)
{
int tc;
memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE);
/* Map netdev TCs to offset 0.
* We have our own UP to TXQ mapping for DCB mode of QoS
*/
for (tc = 0; tc < ntc; tc++) {
tc_to_txq[tc] = (struct netdev_tc_txq) {
.count = nch,
.offset = 0,
};
}
}
static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq,
struct tc_mqprio_qopt *qopt)
{
int tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
tc_to_txq[tc] = (struct netdev_tc_txq) {
.count = qopt->count[tc],
.offset = qopt->offset[tc],
};
}
}
static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
static void mlx5e_mqprio_rl_update_params(struct mlx5e_params *params,
struct mlx5e_mqprio_rl *rl)
{
int tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++) {
u32 hw_id = 0;
if (rl)
mlx5e_mqprio_rl_get_node_hw_id(rl, tc, &hw_id);
params->mqprio.channel.hw_id[tc] = hw_id;
}
}
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
struct tc_mqprio_qopt_offload *mqprio,
struct mlx5e_mqprio_rl *rl)
{
int tc;
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = mqprio->qopt.num_tc;
for (tc = 0; tc < TC_MAX_QUEUE; tc++)
params->mqprio.channel.max_rate[tc] = mqprio->max_rate[tc];
mlx5e_mqprio_rl_update_params(params, rl);
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, &mqprio->qopt);
}
static void mlx5e_params_mqprio_reset(struct mlx5e_params *params)
{
mlx5e_params_mqprio_dcb_set(params, 1);
}
static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
struct tc_mqprio_qopt *mqprio)
{
struct mlx5e_params new_params;
u8 tc = mqprio->num_tc;
int err;
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
if (tc && tc != MLX5E_MAX_NUM_TC)
return -EINVAL;
new_params = priv->channels.params;
mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
if (!err && priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
priv->mqprio_rl = NULL;
}
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
mlx5e_get_dcb_num_tc(&priv->channels.params));
return err;
}
static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
struct net_device *netdev = priv->netdev;
struct mlx5e_ptp *ptp_channel;
int agg_count = 0;
int i;
ptp_channel = priv->channels.ptp;
if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) {
netdev_err(netdev,
"Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n");
return -EINVAL;
}
if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 ||
mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC)
return -EINVAL;
for (i = 0; i < mqprio->qopt.num_tc; i++) {
if (!mqprio->qopt.count[i]) {
netdev_err(netdev, "Zero size for queue-group (%d) is not supported\n", i);
return -EINVAL;
}
if (mqprio->min_rate[i]) {
netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL;
}
if (mqprio->max_rate[i]) {
int err;
err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
if (err)
return err;
}
if (mqprio->qopt.offset[i] != agg_count) {
netdev_err(netdev, "Discontinuous queues config is not supported\n");
return -EINVAL;
}
agg_count += mqprio->qopt.count[i];
}
if (priv->channels.params.num_channels != agg_count) {
netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n",
agg_count, priv->channels.params.num_channels);
return -EINVAL;
}
return 0;
}
static bool mlx5e_mqprio_rate_limit(u8 num_tc, u64 max_rate[])
{
int tc;
for (tc = 0; tc < num_tc; tc++)
if (max_rate[tc])
return true;
return false;
}
static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev,
u8 num_tc, u64 max_rate[])
{
struct mlx5e_mqprio_rl *rl;
int err;
if (!mlx5e_mqprio_rate_limit(num_tc, max_rate))
return NULL;
rl = mlx5e_mqprio_rl_alloc();
if (!rl)
return ERR_PTR(-ENOMEM);
err = mlx5e_mqprio_rl_init(rl, mdev, num_tc, max_rate);
if (err) {
mlx5e_mqprio_rl_free(rl);
return ERR_PTR(err);
}
return rl;
}
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
struct mlx5e_mqprio_rl *rl;
bool nch_changed;
int err;
err = mlx5e_mqprio_channel_validate(priv, mqprio);
if (err)
return err;
rl = mlx5e_mqprio_rl_create(priv->mdev, mqprio->qopt.num_tc, mqprio->max_rate);
if (IS_ERR(rl))
return PTR_ERR(rl);
new_params = priv->channels.params;
mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
if (err) {
if (rl) {
mlx5e_mqprio_rl_cleanup(rl);
mlx5e_mqprio_rl_free(rl);
}
return err;
}
if (priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
mlx5e_mqprio_rl_free(priv->mqprio_rl);
}
priv->mqprio_rl = rl;
return 0;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
/* MQPRIO is another toplevel qdisc that can't be attached
* simultaneously with the offloaded HTB.
*/
if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq)))
return -EINVAL;
switch (mqprio->mode) {
case TC_MQPRIO_MODE_DCB:
return mlx5e_setup_tc_mqprio_dcb(priv, &mqprio->qopt);
case TC_MQPRIO_MODE_CHANNEL:
return mlx5e_setup_tc_mqprio_channel(priv, mqprio);
default:
return -EOPNOTSUPP;
}
}
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
bool tc_unbind = false;
int err;
if (type == TC_SETUP_BLOCK &&
((struct flow_block_offload *)type_data)->command == FLOW_BLOCK_UNBIND)
tc_unbind = true;
if (!netif_device_present(dev) && !tc_unbind)
return -ENODEV;
switch (type) {
case TC_SETUP_BLOCK: {
struct flow_block_offload *f = type_data;
f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
&mlx5e_block_cb_list,
mlx5e_setup_tc_block_cb,
priv, priv, true);
}
case TC_SETUP_QDISC_MQPRIO:
mutex_lock(&priv->state_lock);
err = mlx5e_setup_tc_mqprio(priv, type_data);
mutex_unlock(&priv->state_lock);
return err;
case TC_SETUP_QDISC_HTB:
mutex_lock(&priv->state_lock);
err = mlx5e_htb_setup_tc(priv, type_data);
mutex_unlock(&priv->state_lock);
return err;
default:
return -EOPNOTSUPP;
}
}
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
{
int i;
for (i = 0; i < priv->stats_nch; i++) {
struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
int j;
s->rx_packets += rq_stats->packets + xskrq_stats->packets;
s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tx_dropped += sq_stats->dropped;
}
}
if (priv->tx_ptp_opened) {
for (i = 0; i < priv->max_opened_tc; i++) {
struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[i];
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tx_dropped += sq_stats->dropped;
}
}
if (priv->rx_ptp_opened) {
struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
s->multicast += rq_stats->mcast_packets;
}
}
void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
if (!netif_device_present(dev))
return;
/* In switchdev mode, monitor counters doesn't monitor
* rx/tx stats of 802_3. The update stats mechanism
* should keep the 802_3 layout counters updated
*/
if (!mlx5e_monitor_counter_supported(priv) ||
mlx5e_is_uplink_rep(priv)) {
/* update HW stats in background for next time */
mlx5e_queue_update_stats(priv);
}
if (mlx5e_is_uplink_rep(priv)) {
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
/* vport multicast also counts packets that are dropped due to steering
* or rx out of buffer
*/
stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
} else {
mlx5e_fold_sw_stats64(priv, stats);
}
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
PPORT_802_3_GET(pstats, a_frame_too_long_errors) +
VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small);
stats->rx_crc_errors =
PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors;
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
}
static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv)
{
if (mlx5e_is_uplink_rep(priv))
return; /* no rx mode for uplink rep */
queue_work(priv->wq, &priv->set_rx_mode_work);
}
static void mlx5e_set_rx_mode(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_nic_set_rx_mode(priv);
}
static int mlx5e_set_mac(struct net_device *netdev, void *addr)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct sockaddr *saddr = addr;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev);
eth_hw_addr_set(netdev, saddr->sa_data);
netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv);
return 0;
}
#define MLX5E_SET_FEATURE(features, feature, enable) \
do { \
if (enable) \
*features |= feature; \
else \
*features &= ~feature; \
} while (0)
typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params *cur_params;
struct mlx5e_params new_params;
bool reset = true;
int err = 0;
mutex_lock(&priv->state_lock);
cur_params = &priv->channels.params;
new_params = *cur_params;
if (enable)
new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
else
goto out;
if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
reset = false;
}
}
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
}
static int set_feature_hw_gro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_params new_params;
bool reset = true;
int err = 0;
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
if (enable) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
new_params.packet_merge.shampo.match_criteria_type =
MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
new_params.packet_merge.shampo.alignment_granularity =
MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
} else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
} else {
goto out;
}
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
}
static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
mlx5e_enable_cvlan_filter(priv->fs,
!!(priv->netdev->flags & IFF_PROMISC));
else
mlx5e_disable_cvlan_filter(priv->fs,
!!(priv->netdev->flags & IFF_PROMISC));
return 0;
}
static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
int tc_flag = mlx5e_is_uplink_rep(priv) ? MLX5_TC_FLAG(ESW_OFFLOAD) :
MLX5_TC_FLAG(NIC_OFFLOAD);
if (!enable && mlx5e_tc_num_filters(priv, tc_flag)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
#endif
mutex_lock(&priv->state_lock);
if (!enable && mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
err = -EINVAL;
}
mutex_unlock(&priv->state_lock);
return err;
}
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
return mlx5_set_port_fcs(mdev, !enable);
}
static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
bool supported, curr_state;
int err;
if (!MLX5_CAP_GEN(mdev, ports_check))
return 0;
err = mlx5_query_ports_check(mdev, in, sizeof(in));
if (err)
return err;
supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap);
curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc);
if (!supported || enable == curr_state)
return 0;
MLX5_SET(pcmr_reg, in, local_port, 1);
MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable);
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
{
struct mlx5_core_dev *mdev = priv->mdev;
bool enable = *(bool *)ctx;
return mlx5e_set_rx_port_ts(mdev, enable);
}
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *chs = &priv->channels;
struct mlx5e_params new_params;
int err;
mutex_lock(&priv->state_lock);
new_params = chs->params;
new_params.scatter_fcs_en = enable;
err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
&new_params.scatter_fcs_en, true);
mutex_unlock(&priv->state_lock);
return err;
}
static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
mutex_lock(&priv->state_lock);
mlx5e_fs_set_vlan_strip_disable(priv->fs, !enable);
priv->channels.params.vlan_strip_disable = !enable;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
if (err) {
mlx5e_fs_set_vlan_strip_disable(priv->fs, enable);
priv->channels.params.vlan_strip_disable = enable;
}
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_flow_steering *fs = priv->fs;
if (mlx5e_is_uplink_rep(priv))
return 0; /* no vlan table for uplink rep */
return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
}
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_flow_steering *fs = priv->fs;
if (mlx5e_is_uplink_rep(priv))
return 0; /* no vlan table for uplink rep */
return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
}
#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
if (enable)
err = mlx5e_arfs_enable(priv->fs);
else
err = mlx5e_arfs_disable(priv->fs);
return err;
}
#endif
static int mlx5e_handle_feature(struct net_device *netdev,
netdev_features_t *features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
{
netdev_features_t changes = *features ^ netdev->features;
bool enable = !!(*features & feature);
int err;
if (!(changes & feature))
return 0;
err = feature_handler(netdev, enable);
if (err) {
MLX5E_SET_FEATURE(features, feature, !enable);
netdev_err(netdev, "%s feature %pNF failed, err %d\n",
enable ? "Enable" : "Disable", &feature, err);
return err;
}
return 0;
}
void mlx5e_set_xdp_feature(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_params *params = &priv->channels.params;
xdp_features_t val;
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
xdp_clear_features_flag(netdev);
return;
}
val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
xdp_set_features_flag(netdev, val);
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
{
netdev_features_t oper_features = features;
int err = 0;
#define MLX5E_HANDLE_FEATURE(feature, handler) \
mlx5e_handle_feature(netdev, &oper_features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx);
if (err) {
netdev->features = oper_features;
return -EINVAL;
}
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
return 0;
}
static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev,
netdev_features_t features)
{
features &= ~NETIF_F_HW_TLS_RX;
if (netdev->features & NETIF_F_HW_TLS_RX)
netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
features &= ~NETIF_F_HW_TLS_TX;
if (netdev->features & NETIF_F_HW_TLS_TX)
netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
features &= ~NETIF_F_NTUPLE;
if (netdev->features & NETIF_F_NTUPLE)
netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");
features &= ~NETIF_F_GRO_HW;
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
return features;
}
static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_vlan_table *vlan;
struct mlx5e_params *params;
if (!netif_device_present(netdev))
return features;
vlan = mlx5e_fs_get_vlan(priv->fs);
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
if (!vlan ||
!bitmap_empty(mlx5e_vlan_get_active_svlans(vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
features &= ~NETIF_F_HW_VLAN_CTAG_RX;
if (!params->vlan_strip_disable)
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
}
if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n");
features &= ~NETIF_F_GRO_HW;
}
}
if (params->xdp_prog) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "LRO is incompatible with XDP\n");
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
features &= ~NETIF_F_GRO_HW;
}
}
if (priv->xsk.refcnt) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
features &= ~NETIF_F_LRO;
}
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
features &= ~NETIF_F_GRO_HW;
}
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
features &= ~NETIF_F_RXHASH;
if (netdev->features & NETIF_F_RXHASH)
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
if (features & NETIF_F_GRO_HW) {
netdev_warn(netdev, "Disabling HW-GRO, not supported when CQE compress is active\n");
features &= ~NETIF_F_GRO_HW;
}
}
if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
features |= NETIF_F_NETNS_LOCAL;
} else {
features &= ~NETIF_F_NETNS_LOCAL;
}
mutex_unlock(&priv->state_lock);
return features;
}
static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
struct mlx5e_channels *chs,
struct mlx5e_params *new_params,
struct mlx5_core_dev *mdev)
{
u16 ix;
for (ix = 0; ix < chs->params.num_channels; ix++) {
struct xsk_buff_pool *xsk_pool =
mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
struct mlx5e_xsk_param xsk;
int max_xdp_mtu;
if (!xsk_pool)
continue;
mlx5e_build_xsk_param(xsk_pool, &xsk);
max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk);
/* Validate XSK params and XDP MTU in advance */
if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) ||
new_params->sw_mtu > max_xdp_mtu) {
u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
int max_mtu_frame, max_mtu_page, max_mtu;
/* Two criteria must be met:
* 1. HW MTU + all headrooms <= XSK frame size.
* 2. Size of SKBs allocated on XDP_PASS <= PAGE_SIZE.
*/
max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr);
max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0));
max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu);
netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n",
new_params->sw_mtu, ix, max_mtu);
return false;
}
}
return true;
}
static bool mlx5e_params_validate_xdp(struct net_device *netdev,
struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
bool is_linear;
/* No XSK params: AF_XDP can't be enabled yet at the point of setting
* the XDP program.
*/
is_linear = params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC ?
mlx5e_rx_is_linear_skb(mdev, params, NULL) :
mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL);
if (!is_linear) {
if (!params->xdp_prog->aux->xdp_has_frags) {
netdev_warn(netdev, "MTU(%d) > %d, too big for an XDP program not aware of multi buffer\n",
params->sw_mtu,
mlx5e_xdp_max_mtu(params, NULL));
return false;
}
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!mlx5e_verify_params_rx_mpwqe_strides(mdev, params, NULL)) {
netdev_warn(netdev, "XDP is not allowed with striding RQ and MTU(%d) > %d\n",
params->sw_mtu,
mlx5e_xdp_max_mtu(params, NULL));
return false;
}
}
return true;
}
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_params new_params;
struct mlx5e_params *params;
bool reset = true;
int err = 0;
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
new_params = *params;
new_params.sw_mtu = new_mtu;
err = mlx5e_validate_params(priv->mdev, &new_params);
if (err)
goto out;
if (new_params.xdp_prog && !mlx5e_params_validate_xdp(netdev, priv->mdev,
&new_params)) {
err = -EINVAL;
goto out;
}
if (priv->xsk.refcnt &&
!mlx5e_xsk_validate_mtu(netdev, &priv->channels,
&new_params, priv->mdev)) {
err = -EINVAL;
goto out;
}
if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
reset = false;
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
params->packet_merge.type != MLX5E_PACKET_MERGE_SHAMPO) {
bool is_linear_old = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, params, NULL);
bool is_linear_new = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev,
&new_params, NULL);
u8 sz_old = mlx5e_mpwqe_get_log_rq_size(priv->mdev, params, NULL);
u8 sz_new = mlx5e_mpwqe_get_log_rq_size(priv->mdev, &new_params, NULL);
/* Always reset in linear mode - hw_mtu is used in data path.
* Check that the mode was non-linear and didn't change.
* If XSK is active, XSK RQs are linear.
* Reset if the RQ size changed, even if it's non-linear.
*/
if (!is_linear_old && !is_linear_new && !priv->xsk.refcnt &&
sz_old == sz_new)
reset = false;
}
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
out:
netdev->mtu = params->sw_mtu;
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
{
return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
}
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
{
bool set = *(bool *)ctx;
return mlx5e_ptp_rx_manage_fs(priv, set);
}
static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
{
bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
int err;
if (!rx_filter)
/* Reset CQE compression to Admin default */
return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
return 0;
/* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
if (err)
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
return err;
}
static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
{
struct mlx5e_params new_params;
if (ptp_rx == priv->channels.params.ptp_rx)
return 0;
new_params = priv->channels.params;
new_params.ptp_rx = ptp_rx;
return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
&new_params.ptp_rx, true);
}
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
struct hwtstamp_config config;
bool rx_cqe_compress_def;
bool ptp_rx;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
(mlx5_clock_get_ptp_index(priv->mdev) == -1))
return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
/* TX HW timestamp */
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
break;
default:
return -ERANGE;
}
mutex_lock(&priv->state_lock);
rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
ptp_rx = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
/* ptp_rx is set if both HW TS is set and CQE
* compression is set
*/
ptp_rx = rx_cqe_compress_def;
break;
default:
err = -ERANGE;
goto err_unlock;
}
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
err = mlx5e_hwstamp_config_no_ptp_rx(priv,
config.rx_filter != HWTSTAMP_FILTER_NONE);
else
err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
if (err)
goto err_unlock;
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
/* might need to fix some features */
netdev_update_features(priv->netdev);
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
err_unlock:
mutex_unlock(&priv->state_lock);
return err;
}
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
{
struct hwtstamp_config *cfg = &priv->tstamp;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return -EOPNOTSUPP;
return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
}
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = netdev_priv(dev);
switch (cmd) {
case SIOCSHWTSTAMP:
return mlx5e_hwstamp_set(priv, ifr);
case SIOCGHWTSTAMP:
return mlx5e_hwstamp_get(priv, ifr);
default:
return -EOPNOTSUPP;
}
}
#ifdef CONFIG_MLX5_ESWITCH
int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
}
static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
if (vlan_proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
vlan, qos);
}
static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
}
static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
}
int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
int max_tx_rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
max_tx_rate, min_tx_rate);
}
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
case MLX5_VPORT_ADMIN_STATE_DOWN:
return IFLA_VF_LINK_STATE_DISABLE;
case MLX5_VPORT_ADMIN_STATE_UP:
return IFLA_VF_LINK_STATE_ENABLE;
}
return IFLA_VF_LINK_STATE_AUTO;
}
static int mlx5_ifla_link2vport(u8 ifla_link)
{
switch (ifla_link) {
case IFLA_VF_LINK_STATE_DISABLE:
return MLX5_VPORT_ADMIN_STATE_DOWN;
case IFLA_VF_LINK_STATE_ENABLE:
return MLX5_VPORT_ADMIN_STATE_UP;
}
return MLX5_VPORT_ADMIN_STATE_AUTO;
}
static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
int link_state)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
if (mlx5e_is_uplink_rep(priv))
return -EOPNOTSUPP;
return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
mlx5_ifla_link2vport(link_state));
}
int mlx5e_get_vf_config(struct net_device *dev,
int vf, struct ifla_vf_info *ivi)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
int err;
if (!netif_device_present(dev))
return -EOPNOTSUPP;
err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
if (err)
return err;
ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
return 0;
}
int mlx5e_get_vf_stats(struct net_device *dev,
int vf, struct ifla_vf_stats *vf_stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
vf_stats);
}
static bool
mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
if (!netif_device_present(dev))
return false;
if (!mlx5e_is_uplink_rep(priv))
return false;
return mlx5e_rep_has_offload_stats(dev, attr_id);
}
static int
mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
void *sp)
{
struct mlx5e_priv *priv = netdev_priv(dev);
if (!mlx5e_is_uplink_rep(priv))
return -EOPNOTSUPP;
return mlx5e_rep_get_offload_stats(attr_id, dev, sp);
}
#endif
static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
{
switch (proto_type) {
case IPPROTO_GRE:
return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
case IPPROTO_IPIP:
case IPPROTO_IPV6:
return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
default:
return false;
}
}
static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
struct sk_buff *skb)
{
switch (skb->inner_protocol) {
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TEB):
return true;
case htons(ETH_P_MPLS_UC):
case htons(ETH_P_MPLS_MC):
return MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre);
}
return false;
}
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb,
netdev_features_t features)
{
unsigned int offset = 0;
struct udphdr *udph;
u8 proto;
u16 port;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
break;
default:
goto out;
}
switch (proto) {
case IPPROTO_GRE:
if (mlx5e_gre_tunnel_inner_proto_offload_supported(priv->mdev, skb))
return features;
break;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
return features;
break;
case IPPROTO_UDP:
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest);
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
return features;
#endif
break;
#ifdef CONFIG_MLX5_EN_IPSEC
case IPPROTO_ESP:
return mlx5e_ipsec_feature_check(skb, features);
#endif
}
out:
/* Disable CSUM and GSO if the udp dport is not offloaded by HW */
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
(features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
return mlx5e_tunnel_features_check(priv, skb, features);
return features;
}
static void mlx5e_tx_timeout_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
tx_timeout_work);
struct net_device *netdev = priv->netdev;
int i;
rtnl_lock();
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
for (i = 0; i < netdev->real_num_tx_queues; i++) {
struct netdev_queue *dev_queue =
netdev_get_tx_queue(netdev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i];
if (!netif_xmit_stopped(dev_queue))
continue;
if (mlx5e_reporter_tx_timeout(sq))
/* break if tried to reopened channels */
break;
}
unlock:
mutex_unlock(&priv->state_lock);
rtnl_unlock();
}
static void mlx5e_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mlx5e_priv *priv = netdev_priv(dev);
netdev_err(dev, "TX timeout detected\n");
queue_work(priv->wq, &priv->tx_timeout_work);
}
static int mlx5e_xdp_allowed(struct net_device *netdev, struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
return -EINVAL;
}
if (!mlx5e_params_validate_xdp(netdev, mdev, params))
return -EINVAL;
return 0;
}
static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
{
struct bpf_prog *old_prog;
old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
lockdep_is_held(&rq->priv->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
}
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_params new_params;
struct bpf_prog *old_prog;
int err = 0;
bool reset;
int i;
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
new_params.xdp_prog = prog;
if (prog) {
err = mlx5e_xdp_allowed(netdev, priv->mdev, &new_params);
if (err)
goto unlock;
}
/* no need for full reset when exchanging programs */
reset = (!priv->channels.params.xdp_prog || !prog);
old_prog = priv->channels.params.xdp_prog;
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
if (err)
goto unlock;
if (old_prog)
bpf_prog_put(old_prog);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
/* exchanging programs w/o reset, we update ref counts on behalf
* of the channels RQs here.
*/
bpf_prog_add(prog, priv->channels.num);
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
mlx5e_rq_replace_xdp_prog(&c->rq, prog);
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
bpf_prog_inc(prog);
mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
}
}
unlock:
mutex_unlock(&priv->state_lock);
/* Need to fix some features. */
if (!err)
netdev_update_features(netdev);
return err;
}
static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return mlx5e_xdp_set(dev, xdp->prog);
case XDP_SETUP_XSK_POOL:
return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
}
}
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
int nlflags)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mode, setting;
int err;
err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting);
if (err)
return err;
mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
mode,
0, 0, nlflags, filter_mask, NULL);
}
static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
u16 flags, struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
struct nlattr *attr, *br_spec;
u16 mode = BRIDGE_MODE_UNDEF;
u8 setting;
int rem;
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
mode = nla_get_u16(attr);
if (mode > BRIDGE_MODE_VEPA)
return -EINVAL;
break;
}
if (mode == BRIDGE_MODE_UNDEF)
return -EINVAL;
setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0;
return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting);
}
#endif
const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_setup_tc = mlx5e_setup_tc,
.ndo_select_queue = mlx5e_select_queue,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_set_rx_mode = mlx5e_set_rx_mode,
.ndo_set_mac_address = mlx5e_set_mac,
.ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
.ndo_set_features = mlx5e_set_features,
.ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_nic_mtu,
.ndo_eth_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
.ndo_xdp_xmit = mlx5e_xdp_xmit,
.ndo_xsk_wakeup = mlx5e_xsk_wakeup,
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
#ifdef CONFIG_MLX5_ESWITCH
.ndo_bridge_setlink = mlx5e_bridge_setlink,
.ndo_bridge_getlink = mlx5e_bridge_getlink,
/* SRIOV E-Switch NDOs */
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
.ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
.ndo_set_vf_trust = mlx5e_set_vf_trust,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
.ndo_has_offload_stats = mlx5e_has_offload_stats,
.ndo_get_offload_stats = mlx5e_get_offload_stats,
#endif
};
static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
/* The supported periods are organized in ascending order */
for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
break;
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
priv->max_nch);
mlx5e_params_mqprio_reset(params);
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* XDP SQ */
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* set CQE compression */
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
MLX5_CAP_GEN(mdev, vport_group_manager))
params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
/* RQ */
mlx5e_build_rq_params(mdev, params);
params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev);
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
/* TX inline */
mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
/* AF_XDP */
params->xsk = xsk;
/* Do not update netdev->features directly in here
* on mlx5e_attach_netdev() we will call mlx5e_update_features()
* To update netdev->features please modify mlx5e_fix_features()
*/
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u8 addr[ETH_ALEN];
mlx5_query_mac_address(priv->mdev, addr);
if (is_zero_ether_addr(addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
return;
}
eth_hw_addr_set(netdev, addr);
}
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
}
static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
}
void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
{
if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
return;
priv->nic_info.set_port = mlx5e_vxlan_set_port;
priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
/* Don't count the space hard-coded to the IANA port */
priv->nic_info.tables[0].n_entries =
mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
}
static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
{
int tt;
for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5_get_proto_by_tunnel_type(tt)))
return true;
}
return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
}
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool fcs_supported;
bool fcs_enabled;
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
mlx5e_dcbnl_build_netdev(netdev);
netdev->watchdog_timeo = 15 * HZ;
netdev->ethtool_ops = &mlx5e_ethtool_ops;
netdev->vlan_features |= NETIF_F_SG;
netdev->vlan_features |= NETIF_F_HW_CSUM;
netdev->vlan_features |= NETIF_F_HW_MACSEC;
netdev->vlan_features |= NETIF_F_GRO;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_RXHASH;
netdev->vlan_features |= NETIF_F_GSO_PARTIAL;
netdev->mpls_features |= NETIF_F_SG;
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->mpls_features |= NETIF_F_TSO;
netdev->mpls_features |= NETIF_F_TSO6;
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
/* Tunneled LRO is not supported in the driver, and the same RQs are
* shared between inner and outer TIRs, so the driver can't disable LRO
* for inner TIRs while having it enabled for outer TIRs. Due to this,
* block LRO altogether if the firmware declares tunneled LRO support.
*/
if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
!MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
MLX5E_MPWRQ_UMR_MODE_ALIGNED))
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
netdev->gso_partial_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
}
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6;
netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6;
netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_IPXIP6;
}
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
netdev->features |= NETIF_F_GSO_UDP_L4;
mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
if (fcs_supported)
netdev->hw_features |= NETIF_F_RXALL;
if (MLX5_CAP_ETH(mdev, scatter_fcs))
netdev->hw_features |= NETIF_F_RXFCS;
if (mlx5_qos_is_supported(mdev))
netdev->hw_features |= NETIF_F_HW_TC;
netdev->features = netdev->hw_features;
/* Defaults */
if (fcs_enabled)
netdev->features &= ~NETIF_F_RXALL;
netdev->features &= ~NETIF_F_LRO;
netdev->features &= ~NETIF_F_GRO_HW;
netdev->features &= ~NETIF_F_RXFCS;
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) {
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
#ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
netdev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
mlx5e_set_xdp_feature(netdev);
mlx5e_set_netdev_dev_addr(netdev);
mlx5e_macsec_build_netdev(priv);
mlx5e_ipsec_build_netdev(priv);
mlx5e_ktls_build_netdev(priv);
}
void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{
u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
struct mlx5_core_dev *mdev = priv->mdev;
int err;
MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
if (!err)
priv->q_counter =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
err = mlx5_cmd_exec_inout(mdev, alloc_q_counter, in, out);
if (!err)
priv->drop_rq_q_counter =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
}
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
if (priv->q_counter) {
MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
priv->q_counter);
mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
}
if (priv->drop_rq_q_counter) {
MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
priv->drop_rq_q_counter);
mlx5_cmd_exec_in(priv->mdev, dealloc_q_counter, in);
}
}
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_flow_steering *fs;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
mlx5e_vxlan_set_netdev_info(priv);
mlx5e_timestamp_init(priv);
priv->dfs_root = debugfs_create_dir("nic",
mlx5_debugfs_get_dev_root(mdev));
fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state),
priv->dfs_root);
if (!fs) {
err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
debugfs_remove_recursive(priv->dfs_root);
return err;
}
priv->fs = fs;
err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_health_create_reporters(priv);
/* If netdev is already registered (e.g. move from uplink to nic profile),
* RTNL lock must be held before triggering netdev notifiers.
*/
if (take_rtnl)
rtnl_lock();
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
if (take_rtnl)
rtnl_unlock();
return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
debugfs_remove_recursive(priv->dfs_root);
priv->fs = NULL;
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
enum mlx5e_rx_res_features features;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res)
return -ENOMEM;
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_destroy_q_counters;
}
features = MLX5E_RX_RES_FEATURE_PTP;
if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
priv->max_nch, priv->drop_rq.rqn,
&priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
priv->netdev);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_rx_res;
}
err = mlx5e_tc_nic_init(priv);
if (err)
goto err_destroy_flow_steering;
err = mlx5e_accel_init_rx(priv);
if (err)
goto err_tc_nic_cleanup;
#ifdef CONFIG_MLX5_EN_ARFS
priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
#endif
return 0;
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
return err;
}
static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
}
static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
{
struct mlx5e_params *params;
struct mlx5e_mqprio_rl *rl;
params = &priv->channels.params;
if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL)
return;
rl = mlx5e_mqprio_rl_create(priv->mdev, params->mqprio.num_tc,
params->mqprio.channel.max_rate);
if (IS_ERR(rl))
rl = NULL;
priv->mqprio_rl = rl;
mlx5e_mqprio_rl_update_params(params, rl);
}
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
{
int err;
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
return err;
}
err = mlx5e_accel_init_tx(priv);
if (err)
goto err_destroy_tises;
mlx5e_set_mqprio_rl(priv);
mlx5e_dcbnl_initialize(priv);
return 0;
err_destroy_tises:
mlx5e_destroy_tises(priv);
return err;
}
static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
int err;
mlx5e_fs_init_l2_addr(priv->fs, netdev);
mlx5e_ipsec_init(priv);
err = mlx5e_macsec_init(priv);
if (err)
mlx5_core_err(mdev, "MACsec initialization failed, %d\n", err);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add_netdev(mdev, netdev);
mlx5e_enable_async_events(priv);
mlx5e_enable_blocking_events(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
mlx5e_dcbnl_init_app(priv);
mlx5e_nic_set_rx_mode(priv);
rtnl_lock();
if (netif_running(netdev))
mlx5e_open(netdev);
udp_tunnel_nic_reset_ntf(priv->netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
if (priv->netdev->reg_state == NETREG_REGISTERED)
mlx5e_dcbnl_delete_app(priv);
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
netif_device_detach(priv->netdev);
rtnl_unlock();
mlx5e_nic_set_rx_mode(priv);
mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_blocking_events(priv);
if (priv->en_trap) {
mlx5e_deactivate_trap(priv);
mlx5e_close_trap(priv->en_trap);
priv->en_trap = NULL;
}
mlx5e_disable_async_events(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
mlx5e_macsec_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
{
return mlx5e_refresh_tirs(priv, false, false);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
.init = mlx5e_nic_init,
.cleanup = mlx5e_nic_cleanup,
.init_rx = mlx5e_init_nic_rx,
.cleanup_rx = mlx5e_cleanup_nic_rx,
.init_tx = mlx5e_init_nic_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx,
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
.update_rx = mlx5e_update_nic_rx,
.update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
.max_tc = MLX5E_MAX_NUM_TC,
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
BIT(MLX5E_PROFILE_FEATURE_FS_TC),
};
static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile)
{
int nch;
nch = mlx5e_get_max_num_channels(mdev);
if (profile->max_nch_limit)
nch = min_t(int, nch, profile->max_nch_limit(mdev));
return nch;
}
static unsigned int
mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev,
const struct mlx5e_profile *profile)
{
unsigned int max_nch, tmp;
/* core resources */
max_nch = mlx5e_profile_max_num_channels(mdev, profile);
/* netdev rx queues */
max_nch = min_t(unsigned int, max_nch, netdev->num_rx_queues);
/* netdev tx queues */
tmp = netdev->num_tx_queues;
if (mlx5_qos_is_supported(mdev))
tmp -= mlx5e_qos_max_leaf_nodes(mdev);
if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
tmp -= profile->max_tc;
tmp = tmp / profile->max_tc;
max_nch = min_t(unsigned int, max_nch, tmp);
return max_nch;
}
int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
{
/* Indirect TIRS: 2 sets of TTCs (inner + outer steering)
* and 1 set of direct TIRS
*/
return 2 * MLX5E_NUM_INDIR_TIRS
+ mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
}
void mlx5e_set_rx_mode_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
set_rx_mode_work);
return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
}
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
int nch, num_txqs, node;
int err;
num_txqs = netdev->num_tx_queues;
nch = mlx5e_calc_max_nch(mdev, netdev, profile);
node = dev_to_node(mlx5_core_dma_dev(mdev));
/* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
priv->max_nch = nch;
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
return -ENOMEM;
mutex_init(&priv->state_lock);
err = mlx5e_selq_init(&priv->selq, &priv->state_lock);
if (err)
goto err_free_cpumask;
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
goto err_free_selq;
priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
if (!priv->txq2sq)
goto err_destroy_workqueue;
priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
if (!priv->tx_rates)
goto err_free_txq2sq;
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
if (!priv->channel_stats)
goto err_free_tx_rates;
return 0;
err_free_tx_rates:
kfree(priv->tx_rates);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
destroy_workqueue(priv->wq);
err_free_selq:
mlx5e_selq_cleanup(&priv->selq);
err_free_cpumask:
free_cpumask_var(priv->scratchpad.cpumask);
return -ENOMEM;
}
void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
{
int i;
/* bail if change profile failed and also rollback failed */
if (!priv->mdev)
return;
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq);
mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++)
kfree(priv->htb_qos_sq_stats[i]);
kvfree(priv->htb_qos_sq_stats);
memset(priv, 0, sizeof(*priv));
}
static unsigned int mlx5e_get_max_num_txqs(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile)
{
unsigned int nch, ptp_txqs, qos_txqs;
nch = mlx5e_profile_max_num_channels(mdev, profile);
ptp_txqs = MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) &&
mlx5e_profile_feature_cap(profile, PTP_TX) ?
profile->max_tc : 0;
qos_txqs = mlx5_qos_is_supported(mdev) &&
mlx5e_profile_feature_cap(profile, QOS_HTB) ?
mlx5e_qos_max_leaf_nodes(mdev) : 0;
return nch * profile->max_tc + ptp_txqs + qos_txqs;
}
static unsigned int mlx5e_get_max_num_rxqs(struct mlx5_core_dev *mdev,
const struct mlx5e_profile *profile)
{
return mlx5e_profile_max_num_channels(mdev, profile);
}
struct net_device *
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile)
{
struct net_device *netdev;
unsigned int txqs, rxqs;
int err;
txqs = mlx5e_get_max_num_txqs(mdev, profile);
rxqs = mlx5e_get_max_num_rxqs(mdev, profile);
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
}
err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
goto err_free_netdev;
}
netif_carrier_off(netdev);
netif_tx_disable(netdev);
dev_net_set(netdev, mlx5_core_net(mdev));
return netdev;
err_free_netdev:
free_netdev(netdev);
return NULL;
}
static void mlx5e_update_features(struct net_device *netdev)
{
if (netdev->reg_state != NETREG_REGISTERED)
return; /* features will be updated on netdev registration */
rtnl_lock();
netdev_update_features(netdev);
rtnl_unlock();
}
static void mlx5e_reset_channels(struct net_device *netdev)
{
netdev_reset_tc(netdev);
}
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
const struct mlx5e_profile *profile = priv->profile;
int max_nch;
int err;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
mlx5e_fs_set_state_destroy(priv->fs,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
/* Validate the max_wqe_size_sq capability. */
if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %u\n",
mlx5e_get_max_sq_wqebbs(priv->mdev), (unsigned int)MLX5E_MAX_TX_WQEBBS);
return -EIO;
}
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
/* Reducing the number of channels - RXFH has to be reset, and
* mlx5e_num_channels_changed below will build the RQT.
*/
priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED;
priv->channels.params.num_channels = max_nch;
if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n");
mlx5e_params_mqprio_reset(&priv->channels.params);
}
}
if (max_nch != priv->max_nch) {
mlx5_core_warn(priv->mdev,
"MLX5E: Updating max number of channels from %u to %u\n",
priv->max_nch, max_nch);
priv->max_nch = max_nch;
}
/* 1. Set the real number of queues in the kernel the first time.
* 2. Set our default XPS cpumask.
* 3. Build the RQT.
*
* rtnl_lock is required by netif_set_real_num_*_queues in case the
* netdev has been registered by this point (if this function was called
* in the reload or resume flow).
*/
if (take_rtnl)
rtnl_lock();
err = mlx5e_num_channels_changed(priv);
if (take_rtnl)
rtnl_unlock();
if (err)
goto out;
err = profile->init_tx(priv);
if (err)
goto out;
err = profile->init_rx(priv);
if (err)
goto err_cleanup_tx;
if (profile->enable)
profile->enable(priv);
mlx5e_update_features(priv->netdev);
return 0;
err_cleanup_tx:
profile->cleanup_tx(priv);
out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
mlx5e_fs_set_state_destroy(priv->fs,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
cancel_work_sync(&priv->update_stats_work);
return err;
}
void mlx5e_detach_netdev(struct mlx5e_priv *priv)
{
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
mlx5e_fs_set_state_destroy(priv->fs,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
if (profile->disable)
profile->disable(priv);
flush_workqueue(priv->wq);
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
mlx5e_reset_channels(priv->netdev);
cancel_work_sync(&priv->update_stats_work);
}
static int
mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
const struct mlx5e_profile *new_profile, void *new_ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
err = mlx5e_priv_init(priv, new_profile, netdev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
return err;
}
netif_carrier_off(netdev);
priv->profile = new_profile;
priv->ppriv = new_ppriv;
err = new_profile->init(priv->mdev, priv->netdev);
if (err)
goto priv_cleanup;
return 0;
priv_cleanup:
mlx5e_priv_cleanup(priv);
return err;
}
static int
mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
const struct mlx5e_profile *new_profile, void *new_ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
if (err)
return err;
err = mlx5e_attach_netdev(priv);
if (err)
goto profile_cleanup;
return err;
profile_cleanup:
new_profile->cleanup(priv);
mlx5e_priv_cleanup(priv);
return err;
}
int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
const struct mlx5e_profile *new_profile, void *new_ppriv)
{
const struct mlx5e_profile *orig_profile = priv->profile;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
void *orig_ppriv = priv->ppriv;
int err, rollback_err;
/* cleanup old profile */
mlx5e_detach_netdev(priv);
priv->profile->cleanup(priv);
mlx5e_priv_cleanup(priv);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
return -EIO;
}
err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
if (err) { /* roll back to original profile */
netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
goto rollback;
}
return 0;
rollback:
rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
if (rollback_err)
netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
__func__, rollback_err);
return err;
}
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv)
{
mlx5e_netdev_change_profile(priv, &mlx5e_nic_profile, NULL);
}
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
mlx5e_priv_cleanup(priv);
free_netdev(netdev);
}
static int mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
int err;
if (netif_device_present(netdev))
return 0;
err = mlx5e_create_mdev_resources(mdev);
if (err)
return err;
err = mlx5e_attach_netdev(priv);
if (err) {
mlx5e_destroy_mdev_resources(mdev);
return err;
}
return 0;
}
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
if (!netif_device_present(netdev)) {
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5e_destroy_mdev_resources(mdev);
return -ENODEV;
}
mlx5e_detach_netdev(priv);
mlx5e_destroy_mdev_resources(mdev);
return 0;
}
static int mlx5e_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
const struct mlx5e_profile *profile = &mlx5e_nic_profile;
struct mlx5_core_dev *mdev = edev->mdev;
struct mlx5e_dev *mlx5e_dev;
struct net_device *netdev;
pm_message_t state = {};
struct mlx5e_priv *priv;
int err;
mlx5e_dev = mlx5e_create_devlink(&adev->dev, mdev);
if (IS_ERR(mlx5e_dev))
return PTR_ERR(mlx5e_dev);
auxiliary_set_drvdata(adev, mlx5e_dev);
err = mlx5e_devlink_port_register(mlx5e_dev, mdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
goto err_devlink_unregister;
}
netdev = mlx5e_create_netdev(mdev, profile);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
err = -ENOMEM;
goto err_devlink_port_unregister;
}
SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
mlx5e_dev->priv = priv;
priv->profile = profile;
priv->ppriv = NULL;
err = profile->init(mdev, netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
goto err_destroy_netdev;
}
err = mlx5e_resume(adev);
if (err) {
mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
goto err_profile_cleanup;
}
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
goto err_resume;
}
mlx5e_dcbnl_init_app(priv);
mlx5_core_uplink_netdev_set(mdev, netdev);
mlx5e_params_print_info(mdev, &priv->channels.params);
return 0;
err_resume:
mlx5e_suspend(adev, state);
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
err_devlink_port_unregister:
mlx5e_devlink_port_unregister(mlx5e_dev);
err_devlink_unregister:
mlx5e_destroy_devlink(mlx5e_dev);
return err;
}
static void mlx5e_remove(struct auxiliary_device *adev)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
pm_message_t state = {};
mlx5_core_uplink_netdev_set(priv->mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
mlx5e_destroy_devlink(mlx5e_dev);
}
static const struct auxiliary_device_id mlx5e_id_table[] = {
{ .name = MLX5_ADEV_NAME ".eth", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table);
static struct auxiliary_driver mlx5e_driver = {
.name = "eth",
.probe = mlx5e_probe,
.remove = mlx5e_remove,
.suspend = mlx5e_suspend,
.resume = mlx5e_resume,
.id_table = mlx5e_id_table,
};
int mlx5e_init(void)
{
int ret;
mlx5e_build_ptys2ethtool_map();
ret = auxiliary_driver_register(&mlx5e_driver);
if (ret)
return ret;
ret = mlx5e_rep_init();
if (ret)
auxiliary_driver_unregister(&mlx5e_driver);
return ret;
}
void mlx5e_cleanup(void)
{
mlx5e_rep_cleanup();
auxiliary_driver_unregister(&mlx5e_driver);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
|
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/ethtool_netlink.h>
#include "en.h"
#include "en/port.h"
#include "en/params.h"
#include "en/ptp.h"
#include "lib/clock.h"
#include "en/fs_ethtool.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
struct mlx5_core_dev *mdev = priv->mdev;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
mdev->board_id);
strscpy(drvinfo->bus_info, dev_name(mdev->device),
sizeof(drvinfo->bus_info));
}
static void mlx5e_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_drvinfo(priv, drvinfo);
}
struct ptys2ethtool_config {
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
};
static
struct ptys2ethtool_config ptys2legacy_ethtool_table[MLX5E_LINK_MODES_NUMBER];
static
struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER];
#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, table, ...) \
({ \
struct ptys2ethtool_config *cfg; \
const unsigned int modes[] = { __VA_ARGS__ }; \
unsigned int i, bit, idx; \
cfg = &ptys2##table##_ethtool_table[reg_]; \
bitmap_zero(cfg->supported, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
bitmap_zero(cfg->advertised, \
__ETHTOOL_LINK_MODE_MASK_NBITS); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
bit = modes[i] % 64; \
idx = modes[i] / 64; \
__set_bit(bit, &cfg->supported[idx]); \
__set_bit(bit, &cfg->advertised[idx]); \
} \
})
void mlx5e_build_ptys2ethtool_map(void)
{
memset(ptys2legacy_ethtool_table, 0, sizeof(ptys2legacy_ethtool_table));
memset(ptys2ext_ethtool_table, 0, sizeof(ptys2ext_ethtool_table));
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, legacy,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, legacy,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, legacy,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, legacy,
ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, legacy,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, legacy,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, legacy,
ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, legacy,
ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, legacy,
ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, legacy,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, legacy,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, legacy,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, legacy,
ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, legacy,
ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, legacy,
ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, legacy,
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, legacy,
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, legacy,
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, legacy,
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, legacy,
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, legacy,
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, legacy,
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_SGMII_100M, ext,
ETHTOOL_LINK_MODE_100baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_X_SGMII, ext,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_5GBASE_R, ext,
ETHTOOL_LINK_MODE_5000baseT_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_XFI_XAUI_1, ext,
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
ETHTOOL_LINK_MODE_10000baseER_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_XLAUI_4_XLPPI_4, ext,
ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GAUI_1_25GBASE_CR_KR, ext,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
ext,
ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR, ext,
ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
ETHTOOL_LINK_MODE_50000baseDR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_CAUI_4_100GBASE_CR4_KR4, ext,
ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_2_100GBASE_CR2_KR2, ext,
ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_4_200GBASE_CR4_KR4, ext,
ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext,
ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
ETHTOOL_LINK_MODE_100000baseDR_Full_BIT,
ETHTOOL_LINK_MODE_100000baseCR_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_2_200GBASE_CR2_KR2, ext,
ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT,
ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT);
MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_4_400GBASE_CR4_KR4, ext,
ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT);
}
static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
struct ptys2ethtool_config **arr,
u32 *size)
{
bool ext = mlx5_ptys_ext_supported(mdev);
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
ARRAY_SIZE(ptys2legacy_ethtool_table);
}
typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
struct pflag_desc {
char name[ETH_GSTRING_LEN];
mlx5e_pflag_handler handler;
};
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return mlx5e_stats_total_num(priv);
case ETH_SS_PRIV_FLAGS:
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
return mlx5e_self_test_num(priv);
default:
return -EOPNOTSUPP;
}
}
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_get_sset_count(priv, sset);
}
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
{
int i;
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < MLX5E_NUM_PFLAGS; i++)
strcpy(data + i * ETH_GSTRING_LEN,
mlx5e_priv_flags[i].name);
break;
case ETH_SS_TEST:
mlx5e_self_test_fill_strings(priv, data);
break;
case ETH_SS_STATS:
mlx5e_stats_fill_strings(priv, data);
break;
}
}
static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_strings(priv, stringset, data);
}
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
int idx = 0;
mutex_lock(&priv->state_lock);
mlx5e_stats_update(priv);
mutex_unlock(&priv->state_lock);
mlx5e_stats_fill(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats,
u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
}
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param)
{
/* Limitation for regular RQ. XSK RQ may clamp the queue length in
* mlx5e_mpwqe_get_log_rq_size.
*/
u8 max_log_mpwrq_pkts = mlx5e_mpwrq_max_log_rq_pkts(priv->mdev,
PAGE_SHIFT,
MLX5E_MPWRQ_UMR_MODE_ALIGNED);
param->rx_max_pending = 1 << min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
max_log_mpwrq_pkts);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
param->tx_pending = 1 << priv->channels.params.log_sq_size;
kernel_param->tcp_data_split =
(priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) ?
ETHTOOL_TCP_DATA_SPLIT_ENABLED :
ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param, kernel_param);
}
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param)
{
struct mlx5e_params new_params;
u8 log_rq_size;
u8 log_sq_size;
int err = 0;
if (param->rx_jumbo_pending) {
netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n",
__func__);
return -EINVAL;
}
if (param->rx_mini_pending) {
netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n",
__func__);
return -EINVAL;
}
if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
return -EINVAL;
}
log_rq_size = order_base_2(param->rx_pending);
log_sq_size = order_base_2(param->tx_pending);
if (log_rq_size == priv->channels.params.log_rq_mtu_frames &&
log_sq_size == priv->channels.params.log_sq_size)
return 0;
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
new_params.log_rq_mtu_frames = log_rq_size;
new_params.log_sq_size = log_sq_size;
err = mlx5e_validate_params(priv->mdev, &new_params);
if (err)
goto unlock;
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_set_ringparam(priv, param);
}
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
mutex_lock(&priv->state_lock);
ch->max_combined = priv->max_nch;
ch->combined_count = priv->channels.params.num_channels;
mutex_unlock(&priv->state_lock);
}
static void mlx5e_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_channels(priv, ch);
}
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch)
{
struct mlx5e_params *cur_params = &priv->channels.params;
unsigned int count = ch->combined_count;
struct mlx5e_params new_params;
bool arfs_enabled;
int rss_cnt;
bool opened;
int err = 0;
if (!count) {
netdev_info(priv->netdev, "%s: combined_count=0 not supported\n",
__func__);
return -EINVAL;
}
if (cur_params->num_channels == count)
return 0;
mutex_lock(&priv->state_lock);
/* Don't allow changing the number of channels if HTB offload is active,
* because the numeration of the QoS SQs will change, while per-queue
* qdiscs are attached.
*/
if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
__func__);
goto out;
}
/* Don't allow changing the number of channels if non-default RSS contexts exist,
* the kernel doesn't protect against set_channels operations that break them.
*/
rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1;
if (rss_cnt) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n",
__func__, rss_cnt);
goto out;
}
/* Don't allow changing the number of channels if MQPRIO mode channel offload is active,
* because it defines a partition over the channels queues.
*/
if (cur_params->mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
err = -EINVAL;
netdev_err(priv->netdev, "%s: MQPRIO mode channel offload is active, cannot change the number of channels\n",
__func__);
goto out;
}
new_params = *cur_params;
new_params.num_channels = count;
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
if (arfs_enabled)
mlx5e_arfs_disable(priv->fs);
/* Switch to new channels, set new parameters and close old ones */
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
if (arfs_enabled) {
int err2 = mlx5e_arfs_enable(priv->fs);
if (err2)
netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n",
__func__, err2);
}
out:
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_set_channels(priv, ch);
}
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal)
{
struct dim_cq_moder *rx_moder, *tx_moder;
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
rx_moder = &priv->channels.params.rx_cq_moderation;
coal->rx_coalesce_usecs = rx_moder->usec;
coal->rx_max_coalesced_frames = rx_moder->pkts;
coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
tx_moder = &priv->channels.params.tx_cq_moderation;
coal->tx_coalesce_usecs = tx_moder->usec;
coal->tx_max_coalesced_frames = tx_moder->pkts;
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
kernel_coal->use_cqe_mode_rx =
MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
kernel_coal->use_cqe_mode_tx =
MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
return 0;
}
static int mlx5e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
static void
mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
struct mlx5_core_dev *mdev = priv->mdev;
int tc;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
for (tc = 0; tc < c->num_tc; tc++) {
mlx5_core_modify_cq_moderation(mdev,
&c->sq[tc].cq.mcq,
coal->tx_coalesce_usecs,
coal->tx_max_coalesced_frames);
}
}
}
static void
mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
coal->rx_coalesce_usecs,
coal->rx_max_coalesced_frames);
}
}
/* convert a boolean value of cq_mode to mlx5 period mode
* true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE
* false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE
*/
static int cqe_mode_to_period_mode(bool val)
{
return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
}
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
bool reset_rx, reset_tx;
bool reset = true;
u8 cq_period_mode;
int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
__func__, MLX5E_MAX_COAL_TIME);
return -ERANGE;
}
if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
__func__, MLX5E_MAX_COAL_FRAMES);
return -ERANGE;
}
if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
!MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) {
NL_SET_ERR_MSG_MOD(extack, "cqe_mode_rx/tx is not supported on this device");
return -EOPNOTSUPP;
}
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
rx_moder = &new_params.rx_cq_moderation;
rx_moder->usec = coal->rx_coalesce_usecs;
rx_moder->pkts = coal->rx_max_coalesced_frames;
new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
tx_moder = &new_params.tx_cq_moderation;
tx_moder->usec = coal->tx_coalesce_usecs;
tx_moder->pkts = coal->tx_max_coalesced_frames;
new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx);
if (cq_period_mode != rx_moder->cq_period_mode) {
mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
reset_rx = true;
}
cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx);
if (cq_period_mode != tx_moder->cq_period_mode) {
mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
reset_tx = true;
}
if (reset_rx) {
u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_RX_CQE_BASED_MODER);
mlx5e_reset_rx_moderation(&new_params, mode);
}
if (reset_tx) {
u8 mode = MLX5E_GET_PFLAG(&new_params,
MLX5E_PFLAG_TX_CQE_BASED_MODER);
mlx5e_reset_tx_moderation(&new_params, mode);
}
/* If DIM state hasn't changed, it's possible to modify interrupt
* moderation parameters on the fly, even if the channels are open.
*/
if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
if (!coal->use_adaptive_rx_coalesce)
mlx5e_set_priv_channels_rx_coalesce(priv, coal);
if (!coal->use_adaptive_tx_coalesce)
mlx5e_set_priv_channels_tx_coalesce(priv, coal);
reset = false;
}
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
unsigned long *supported_modes,
u32 eth_proto_cap)
{
unsigned long proto_cap = eth_proto_cap;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(supported_modes, supported_modes,
table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
u32 eth_proto_cap, bool ext)
{
unsigned long proto_cap = eth_proto_cap;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
ARRAY_SIZE(ptys2legacy_ethtool_table);
for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(advertising_modes, advertising_modes,
table[proto].advertised,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_NOFEC] = ETHTOOL_FEC_OFF,
[MLX5E_FEC_FIRECODE] = ETHTOOL_FEC_BASER,
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS,
[MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
{
int mode = 0;
if (!fec_mode)
return ETHTOOL_FEC_AUTO;
mode = find_first_bit(&fec_mode, size);
if (mode < ARRAY_SIZE(pplm_fec_2_ethtool))
return pplm_fec_2_ethtool[mode];
return 0;
}
#define MLX5E_ADVERTISE_SUPPORTED_FEC(mlx5_fec, ethtool_fec) \
do { \
if (mlx5e_fec_in_caps(dev, 1 << (mlx5_fec))) \
__set_bit(ethtool_fec, \
link_ksettings->link_modes.supported);\
} while (0)
static const u32 pplm_fec_2_ethtool_linkmodes[] = {
[MLX5E_FEC_NOFEC] = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
[MLX5E_FEC_FIRECODE] = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
[MLX5E_FEC_RS_528_514] = ETHTOOL_LINK_MODE_FEC_RS_BIT,
[MLX5E_FEC_RS_544_514] = ETHTOOL_LINK_MODE_FEC_RS_BIT,
[MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
};
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long active_fec_long;
u32 active_fec;
u32 bitn;
int err;
err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
if (err)
return (err == -EOPNOTSUPP) ? 0 : err;
MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_NOFEC,
ETHTOOL_LINK_MODE_FEC_NONE_BIT);
MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_FIRECODE,
ETHTOOL_LINK_MODE_FEC_BASER_BIT);
MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_RS_528_514,
ETHTOOL_LINK_MODE_FEC_RS_BIT);
MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1,
ETHTOOL_LINK_MODE_FEC_LLRS_BIT);
active_fec_long = active_fec;
/* active fec is a bit set, find out which bit is set and
* advertise the corresponding ethtool bit
*/
bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE);
if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes))
__set_bit(pplm_fec_2_ethtool_linkmodes[bitn],
link_ksettings->link_modes.advertising);
return 0;
}
static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap, u8 connector_type)
{
if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported,
FIBRE);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising,
FIBRE);
}
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
| MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported,
Backplane);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising,
Backplane);
}
return;
}
switch (connector_type) {
case MLX5E_PORT_TP:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, TP);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, TP);
break;
case MLX5E_PORT_AUI:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, AUI);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, AUI);
break;
case MLX5E_PORT_BNC:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, BNC);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, BNC);
break;
case MLX5E_PORT_MII:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, MII);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, MII);
break;
case MLX5E_PORT_FIBRE:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, FIBRE);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, FIBRE);
break;
case MLX5E_PORT_DA:
ethtool_link_ksettings_add_link_mode(link_ksettings,
supported, Backplane);
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Backplane);
break;
case MLX5E_PORT_NONE:
case MLX5E_PORT_OTHER:
default:
break;
}
}
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper, bool force_legacy,
u16 data_rate_oper,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
if (!netif_carrier_ok(netdev))
goto out;
speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
if (!speed) {
if (data_rate_oper)
speed = 100 * data_rate_oper;
else
speed = SPEED_UNKNOWN;
goto out;
}
duplex = DUPLEX_FULL;
out:
link_ksettings->base.speed = speed;
link_ksettings->base.duplex = duplex;
}
static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *supported = link_ksettings->link_modes.supported;
ptys2ethtool_supported_link(mdev, supported, eth_proto_cap);
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
struct ethtool_link_ksettings *link_ksettings,
bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
if (tx_pause ^ rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
}
static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_UNKNOWN] = PORT_OTHER,
[MLX5E_PORT_NONE] = PORT_NONE,
[MLX5E_PORT_TP] = PORT_TP,
[MLX5E_PORT_AUI] = PORT_AUI,
[MLX5E_PORT_BNC] = PORT_BNC,
[MLX5E_PORT_MII] = PORT_MII,
[MLX5E_PORT_FIBRE] = PORT_FIBRE,
[MLX5E_PORT_DA] = PORT_DA,
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
{
if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
return ptys2connector_type[connector_type];
if (eth_proto &
(MLX5E_PROT_MASK(MLX5E_10GBASE_SR) |
MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) |
MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) |
MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
return PORT_FIBRE;
}
if (eth_proto &
(MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) |
MLX5E_PROT_MASK(MLX5E_10GBASE_CR) |
MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
return PORT_DA;
}
if (eth_proto &
(MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) |
MLX5E_PROT_MASK(MLX5E_10GBASE_KR) |
MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) |
MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
return PORT_NONE;
}
return PORT_OTHER;
}
static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
bool ext = mlx5_ptys_ext_supported(mdev);
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
u32 eth_proto_admin;
u8 an_disable_admin;
u16 data_rate_oper;
u32 eth_proto_oper;
u32 eth_proto_cap;
u8 connector_type;
u32 rx_pause = 0;
u32 tx_pause = 0;
u32 eth_proto_lp;
bool admin_ext;
u8 an_status;
bool ext;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
if (err) {
netdev_err(priv->netdev, "%s: query port ptys failed: %d\n",
__func__, err);
goto err_query_regs;
}
ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_admin);
/* Fields: eth_proto_admin and ext_eth_proto_admin are
* mutually exclusive. Hence try reading legacy advertising
* when extended advertising is zero.
* admin_ext indicates which proto_admin (ext vs. legacy)
* should be read and interpreted
*/
admin_ext = ext;
if (ext && !eth_proto_admin) {
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
eth_proto_admin);
admin_ext = false;
}
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, admin_ext,
eth_proto_oper);
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
an_status = MLX5_GET(ptys_reg, out, an_status);
connector_type = MLX5_GET(ptys_reg, out, connector_type);
data_rate_oper = MLX5_GET(ptys_reg, out, data_rate_oper);
mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
data_rate_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
connector_type : MLX5E_PORT_UNKNOWN;
link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
connector_type);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
ethtool_link_ksettings_add_link_mode(link_ksettings,
lp_advertising, Autoneg);
link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE :
AUTONEG_ENABLE;
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
err = get_fec_supported_advertised(mdev, link_ksettings);
if (err) {
netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
__func__, err);
err = 0; /* don't fail caps query because of FEC error */
}
if (!an_disable_admin)
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Autoneg);
err_query_regs:
return err;
}
static int mlx5e_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}
static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
const unsigned long link_modes, u8 autoneg)
{
/* Extended link-mode has no speed limitations. */
if (ext)
return 0;
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
autoneg != AUTONEG_ENABLE) {
netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
__func__);
return -EINVAL;
}
return 0;
}
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
if (*ptys2legacy_ethtool_table[i].advertised == 0)
continue;
if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised,
link_modes,
__ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
}
return ptys_modes;
}
static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
unsigned long modes[2];
for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) {
if (ptys2ext_ethtool_table[i].advertised[0] == 0 &&
ptys2ext_ethtool_table[i].advertised[1] == 0)
continue;
memset(modes, 0, sizeof(modes));
bitmap_and(modes, ptys2ext_ethtool_table[i].advertised,
link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] &&
modes[1] == ptys2ext_ethtool_table[i].advertised[1])
ptys_modes |= MLX5E_PROT_MASK(i);
}
return ptys_modes;
}
static bool ext_link_mode_requested(const unsigned long *adver)
{
#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
__ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,};
bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported)
{
bool ext_link_mode = ext_link_mode_requested(adver);
return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
}
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
u8 autoneg;
u32 speed;
bool ext;
int err;
u32 (*ethtool2ptys_adver_func)(const unsigned long *adver);
adver = link_ksettings->link_modes.advertising;
autoneg = link_ksettings->base.autoneg;
speed = link_ksettings->base.speed;
ext_supported = mlx5_ptys_ext_supported(mdev);
ext = ext_requested(autoneg, adver, ext_supported);
if (!ext_supported && ext)
return -EOPNOTSUPP;
ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
goto out;
}
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5_port_speed2linkmodes(mdev, speed, !ext);
err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
if (err)
goto out;
link_modes = link_modes & eproto.cap;
if (!link_modes) {
netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
__func__);
err = -EINVAL;
goto out;
}
mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
&an_disable_admin);
an_disable = autoneg == AUTONEG_DISABLE;
an_changes = ((!an_disable && an_disable_admin) ||
(an_disable && !an_disable_admin));
if (!an_changes && link_modes == eproto.admin)
goto out;
mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
mlx5_toggle_port_link(mdev);
out:
return err;
}
static int mlx5e_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
}
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
{
return sizeof_field(struct mlx5e_rss_params_hash, toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_rxfh_key_size(priv);
}
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv)
{
return MLX5E_INDIR_RQT_SIZE;
}
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir,
u8 *key, u8 *hfunc, u32 rss_context)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc);
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc,
u32 *rss_context, bool delete)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
mutex_lock(&priv->state_lock);
if (delete) {
err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
goto unlock;
}
if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
unsigned int count = priv->channels.params.num_channels;
err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count);
if (err)
goto unlock;
}
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0);
}
int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key,
hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
mutex_unlock(&priv->state_lock);
return err;
}
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
#define MLX5E_PFC_PREVEN_TOUT_MIN_MSEC 80
#define MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout) \
max_t(u16, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC, \
(critical_tout * MLX5E_PFC_PREVEN_MINOR_PRECENT) / 100)
static int mlx5e_get_pfc_prevention_tout(struct net_device *netdev,
u16 *pfc_prevention_tout)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
!MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
return -EOPNOTSUPP;
return mlx5_query_port_stall_watermark(mdev, pfc_prevention_tout, NULL);
}
static int mlx5e_set_pfc_prevention_tout(struct net_device *netdev,
u16 pfc_preven)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u16 critical_tout;
u16 minor;
if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) ||
!MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
return -EOPNOTSUPP;
critical_tout = (pfc_preven == PFC_STORM_PREVENTION_AUTO) ?
MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC :
pfc_preven;
if (critical_tout != PFC_STORM_PREVENTION_DISABLE &&
(critical_tout > MLX5E_PFC_PREVEN_TOUT_MAX_MSEC ||
critical_tout < MLX5E_PFC_PREVEN_TOUT_MIN_MSEC)) {
netdev_info(netdev, "%s: pfc prevention tout not in range (%d-%d)\n",
__func__, MLX5E_PFC_PREVEN_TOUT_MIN_MSEC,
MLX5E_PFC_PREVEN_TOUT_MAX_MSEC);
return -EINVAL;
}
minor = MLX5E_DEVICE_STALL_MINOR_WATERMARK(critical_tout);
return mlx5_set_port_stall_watermark(mdev, critical_tout,
minor);
}
static int mlx5e_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
void *data)
{
int err;
switch (tuna->id) {
case ETHTOOL_PFC_PREVENTION_TOUT:
err = mlx5e_get_pfc_prevention_tout(dev, data);
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int mlx5e_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err;
mutex_lock(&priv->state_lock);
switch (tuna->id) {
case ETHTOOL_PFC_PREVENTION_TOUT:
err = mlx5e_set_pfc_prevention_tout(dev, *(u16 *)data);
break;
default:
err = -EINVAL;
break;
}
mutex_unlock(&priv->state_lock);
return err;
}
static void mlx5e_get_pause_stats(struct net_device *netdev,
struct ethtool_pause_stats *pause_stats)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5e_stats_pause_get(priv, pause_stats);
}
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
&pauseparam->tx_pause);
if (err) {
netdev_err(priv->netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
__func__, err);
}
}
static void mlx5e_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pauseparam)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5e_ethtool_get_pauseparam(priv, pauseparam);
}
int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EOPNOTSUPP;
if (pauseparam->autoneg)
return -EINVAL;
err = mlx5_set_port_pause(mdev,
pauseparam->rx_pause ? 1 : 0,
pauseparam->tx_pause ? 1 : 0);
if (err) {
netdev_err(priv->netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
__func__, err);
}
return err;
}
static int mlx5e_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pauseparam)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
}
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info)
{
struct mlx5_core_dev *mdev = priv->mdev;
info->phc_index = mlx5_clock_get_ptp_index(mdev);
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
info->phc_index == -1)
return 0;
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
static int mlx5e_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_get_ts_info(priv, info);
}
static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
{
__u32 ret = 0;
if (MLX5_CAP_GEN(mdev, wol_g))
ret |= WAKE_MAGIC;
if (MLX5_CAP_GEN(mdev, wol_s))
ret |= WAKE_MAGICSECURE;
if (MLX5_CAP_GEN(mdev, wol_a))
ret |= WAKE_ARP;
if (MLX5_CAP_GEN(mdev, wol_b))
ret |= WAKE_BCAST;
if (MLX5_CAP_GEN(mdev, wol_m))
ret |= WAKE_MCAST;
if (MLX5_CAP_GEN(mdev, wol_u))
ret |= WAKE_UCAST;
if (MLX5_CAP_GEN(mdev, wol_p))
ret |= WAKE_PHY;
return ret;
}
static __u32 mlx5e_reformat_wol_mode_mlx5_to_linux(u8 mode)
{
__u32 ret = 0;
if (mode & MLX5_WOL_MAGIC)
ret |= WAKE_MAGIC;
if (mode & MLX5_WOL_SECURED_MAGIC)
ret |= WAKE_MAGICSECURE;
if (mode & MLX5_WOL_ARP)
ret |= WAKE_ARP;
if (mode & MLX5_WOL_BROADCAST)
ret |= WAKE_BCAST;
if (mode & MLX5_WOL_MULTICAST)
ret |= WAKE_MCAST;
if (mode & MLX5_WOL_UNICAST)
ret |= WAKE_UCAST;
if (mode & MLX5_WOL_PHY_ACTIVITY)
ret |= WAKE_PHY;
return ret;
}
static u8 mlx5e_reformat_wol_mode_linux_to_mlx5(__u32 mode)
{
u8 ret = 0;
if (mode & WAKE_MAGIC)
ret |= MLX5_WOL_MAGIC;
if (mode & WAKE_MAGICSECURE)
ret |= MLX5_WOL_SECURED_MAGIC;
if (mode & WAKE_ARP)
ret |= MLX5_WOL_ARP;
if (mode & WAKE_BCAST)
ret |= MLX5_WOL_BROADCAST;
if (mode & WAKE_MCAST)
ret |= MLX5_WOL_MULTICAST;
if (mode & WAKE_UCAST)
ret |= MLX5_WOL_UNICAST;
if (mode & WAKE_PHY)
ret |= MLX5_WOL_PHY_ACTIVITY;
return ret;
}
static void mlx5e_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 mlx5_wol_mode;
int err;
memset(wol, 0, sizeof(*wol));
wol->supported = mlx5e_get_wol_supported(mdev);
if (!wol->supported)
return;
err = mlx5_query_port_wol(mdev, &mlx5_wol_mode);
if (err)
return;
wol->wolopts = mlx5e_reformat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
}
static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
__u32 wol_supported = mlx5e_get_wol_supported(mdev);
u32 mlx5_wol_mode;
if (!wol_supported)
return -EOPNOTSUPP;
if (wol->wolopts & ~wol_supported)
return -EINVAL;
mlx5_wol_mode = mlx5e_reformat_wol_mode_linux_to_mlx5(wol->wolopts);
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
static void mlx5e_get_fec_stats(struct net_device *netdev,
struct ethtool_fec_stats *fec_stats)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5e_stats_fec_get(priv, fec_stats);
}
static int mlx5e_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u16 fec_configured;
u32 fec_active;
int err;
err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
if (err)
return err;
fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active,
sizeof(unsigned long) * BITS_PER_BYTE);
if (!fecparam->active_fec)
return -EOPNOTSUPP;
fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured,
sizeof(unsigned long) * BITS_PER_BYTE);
return 0;
}
static int mlx5e_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
unsigned long fec_bitmap;
u16 fec_policy = 0;
int mode;
int err;
bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
return -EOPNOTSUPP;
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
if (!(pplm_fec_2_ethtool[mode] & fecparam->fec))
continue;
fec_policy |= (1 << mode);
break;
}
err = mlx5e_set_fec_mode(mdev, fec_policy);
if (err)
return err;
mlx5_toggle_port_link(mdev);
return 0;
}
static int mlx5e_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
u16 beacon_duration;
if (!MLX5_CAP_GEN(mdev, beacon_led))
return -EOPNOTSUPP;
switch (state) {
case ETHTOOL_ID_ACTIVE:
beacon_duration = MLX5_BEACON_DURATION_INF;
break;
case ETHTOOL_ID_INACTIVE:
beacon_duration = MLX5_BEACON_DURATION_OFF;
break;
default:
return -EOPNOTSUPP;
}
return mlx5_set_port_beacon(mdev, beacon_duration);
}
static int mlx5e_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *dev = priv->mdev;
int size_read = 0;
u8 data[4] = {0};
size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
if (size_read < 2)
return -EIO;
/* data[0] = identifier byte */
switch (data[0]) {
case MLX5_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
/* data[1] = revision id */
if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
modinfo->type = ETH_MODULE_SFF_8636;
modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLX5_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
__func__, data[0]);
return -EINVAL;
}
return 0;
}
static int mlx5e_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee,
u8 *data)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
int offset = ee->offset;
int size_read;
int i = 0;
if (!ee->len)
return -EINVAL;
memset(data, 0, ee->len);
while (i < ee->len) {
size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
data + i);
if (!size_read)
/* Done reading */
return 0;
if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
__func__, size_read);
return size_read;
}
i += size_read;
offset += size_read;
}
return 0;
}
static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev,
const struct ethtool_module_eeprom *page_data,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_module_eeprom_query_params query;
struct mlx5_core_dev *mdev = priv->mdev;
u8 *data = page_data->data;
int size_read;
int i = 0;
if (!page_data->length)
return -EINVAL;
memset(data, 0, page_data->length);
query.offset = page_data->offset;
query.i2c_address = page_data->i2c_address;
query.bank = page_data->bank;
query.page = page_data->page;
while (i < page_data->length) {
query.size = page_data->length - i;
size_read = mlx5_query_module_eeprom_by_page(mdev, &query, data + i);
/* Done reading, return how many bytes was read */
if (!size_read)
return i;
if (size_read == -EINVAL)
return -EINVAL;
if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_module_eeprom_by_page failed:0x%x\n",
__func__, size_read);
return i;
}
i += size_read;
query.offset += size_read;
}
return i;
}
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *dev = priv->netdev;
const struct firmware *fw;
int err;
if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
return -EOPNOTSUPP;
err = request_firmware_direct(&fw, flash->data, &dev->dev);
if (err)
return err;
dev_hold(dev);
rtnl_unlock();
err = mlx5_firmware_flash(mdev, fw, NULL);
release_firmware(fw);
rtnl_lock();
dev_put(dev);
return err;
}
static int mlx5e_flash_device(struct net_device *dev,
struct ethtool_flash *flash)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_flash_device(priv, flash);
}
static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u8 cq_period_mode, current_cq_period_mode;
struct mlx5e_params new_params;
if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP;
cq_period_mode = cqe_mode_to_period_mode(enable);
current_cq_period_mode = is_rx_cq ?
priv->channels.params.rx_cq_moderation.cq_period_mode :
priv->channels.params.tx_cq_moderation.cq_period_mode;
if (cq_period_mode == current_cq_period_mode)
return 0;
new_params = priv->channels.params;
if (is_rx_cq)
mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
else
mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
{
return set_pflag_cqe_based_moder(netdev, enable, false);
}
static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
{
return set_pflag_cqe_based_moder(netdev, enable, true);
}
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
struct mlx5e_params new_params;
int err = 0;
if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
return new_val ? -EOPNOTSUPP : 0;
if (curr_val == new_val)
return 0;
if (new_val && !mlx5e_profile_feature_cap(priv->profile, PTP_RX) && rx_filter) {
netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL;
}
if (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
netdev_warn(priv->netdev, "Can't set CQE compression with HW-GRO, disable it first.\n");
return -EINVAL;
}
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (rx_filter)
new_params.ptp_rx = new_val;
if (new_params.ptp_rx == priv->channels.params.ptp_rx)
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
else
err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
&new_params.ptp_rx, true);
if (err)
return err;
netdev_dbg(priv->netdev, "MLX5E: RxCqeCmprss was turned %s\n",
MLX5E_GET_PFLAG(&priv->channels.params,
MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
return 0;
}
static int set_pflag_rx_cqe_compress(struct net_device *netdev,
bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool rx_filter;
int err;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
if (err)
return err;
priv->channels.params.rx_cqe_compress_def = enable;
return 0;
}
static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
int err;
if (enable) {
/* Checking the regular RQ here; mlx5e_validate_xsk_param called
* from mlx5e_open_xsk will check for each XSK queue, and
* mlx5e_safe_switch_params will be reverted if any check fails.
*/
int err = mlx5e_mpwrq_validate_regular(mdev, &priv->channels.params);
if (err)
return err;
} else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
return -EINVAL;
}
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable);
mlx5e_set_rq_type(mdev, &new_params);
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
if (err)
return err;
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
return 0;
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *channels = &priv->channels;
struct mlx5e_channel *c;
int i;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
priv->channels.params.xdp_prog)
return 0;
for (i = 0; i < channels->num; i++) {
c = channels->c[i];
if (enable)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
else
__clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
}
return 0;
}
static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
if (enable && !mlx5e_tx_mpwqe_supported(mdev))
return -EOPNOTSUPP;
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, flag, enable);
return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
}
static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
{
return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
}
static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable)
{
return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable);
}
static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
int err;
if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn) ||
!MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
return -EOPNOTSUPP;
/* Don't allow changing the PTP state if HTB offload is active, because
* the numeration of the QoS SQs will change, while per-queue qdiscs are
* attached.
*/
if (mlx5e_selq_is_htb_enabled(&priv->selq)) {
netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
__func__);
return -EINVAL;
}
new_params = priv->channels.params;
/* Don't allow enabling TX-port-TS if MQPRIO mode channel offload is
* active, since it defines explicitly which TC accepts the packet.
* This conflicts with TX-port-TS hijacking the PTP traffic to a specific
* HW TX-queue.
*/
if (enable && new_params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) {
netdev_err(priv->netdev,
"%s: MQPRIO mode channel offload is active, cannot set the TX-port-TS\n",
__func__);
return -EINVAL;
}
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
* ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
* has the same log_sq_size.
*/
err = mlx5e_safe_switch_params(priv, &new_params,
mlx5e_num_channels_changed_ctx, NULL, true);
if (!err)
priv->tx_ptp_opened = true;
return err;
}
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
{ "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
{ "rx_cqe_compress", set_pflag_rx_cqe_compress },
{ "rx_striding_rq", set_pflag_rx_striding_rq },
{ "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
{ "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe },
{ "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe },
{ "tx_port_ts", set_pflag_tx_port_ts },
};
static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags,
enum mlx5e_priv_flag flag)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
bool enable = !!(wanted_flags & BIT(flag));
u32 changes = wanted_flags ^ priv->channels.params.pflags;
int err;
if (!(changes & BIT(flag)))
return 0;
err = mlx5e_priv_flags[flag].handler(netdev, enable);
if (err) {
netdev_err(netdev, "%s private flag '%s' failed err %d\n",
enable ? "Enable" : "Disable", mlx5e_priv_flags[flag].name, err);
return err;
}
MLX5E_SET_PFLAG(&priv->channels.params, flag, enable);
return 0;
}
static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
enum mlx5e_priv_flag pflag;
int err;
mutex_lock(&priv->state_lock);
for (pflag = 0; pflag < MLX5E_NUM_PFLAGS; pflag++) {
err = mlx5e_handle_pflag(netdev, pflags, pflag);
if (err)
break;
}
mutex_unlock(&priv->state_lock);
/* Need to fix some features.. */
netdev_update_features(netdev);
return err;
}
static u32 mlx5e_get_priv_flags(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return priv->channels.params.pflags;
}
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
/* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
* of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
* to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
* is compiled out via CONFIG_MLX5_EN_RXNFC=n.
*/
if (info->cmd == ETHTOOL_GRXRINGS) {
info->data = priv->channels.params.num_channels;
return 0;
}
return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode)
{
struct mlx5_ifc_pddr_troubleshooting_page_bits *pddr_troubleshooting_page;
u32 in[MLX5_ST_SZ_DW(pddr_reg)] = {};
u32 out[MLX5_ST_SZ_DW(pddr_reg)];
int err;
MLX5_SET(pddr_reg, in, local_port, 1);
MLX5_SET(pddr_reg, in, page_select,
MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE);
pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, in, page_data);
MLX5_SET(pddr_troubleshooting_page, pddr_troubleshooting_page,
group_opcode, MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR);
err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PDDR, 0, 0);
if (err)
return err;
pddr_troubleshooting_page = MLX5_ADDR_OF(pddr_reg, out, page_data);
*status_opcode = MLX5_GET(pddr_troubleshooting_page, pddr_troubleshooting_page,
status_opcode);
return 0;
}
struct mlx5e_ethtool_link_ext_state_opcode_mapping {
u32 status_opcode;
enum ethtool_link_ext_state link_ext_state;
u8 link_ext_substate;
};
static const struct mlx5e_ethtool_link_ext_state_opcode_mapping
mlx5e_link_ext_state_opcode_map[] = {
/* States relating to the autonegotiation or issues therein */
{2, ETHTOOL_LINK_EXT_STATE_AUTONEG,
ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED},
{3, ETHTOOL_LINK_EXT_STATE_AUTONEG,
ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED},
{4, ETHTOOL_LINK_EXT_STATE_AUTONEG,
ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED},
{36, ETHTOOL_LINK_EXT_STATE_AUTONEG,
ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE},
{38, ETHTOOL_LINK_EXT_STATE_AUTONEG,
ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE},
{39, ETHTOOL_LINK_EXT_STATE_AUTONEG,
ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD},
/* Failure during link training */
{5, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED},
{6, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT},
{7, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY},
{8, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0},
{14, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT},
/* Logical mismatch in physical coding sublayer or forward error correction sublayer */
{9, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK},
{10, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK},
{11, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS},
{12, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED},
{13, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED},
/* Signal integrity issues */
{15, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0},
{17, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS},
{42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE},
/* No cable connected */
{1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0},
/* Failure is related to cable, e.g., unsupported cable */
{16, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
{20, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
{29, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
{1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
{1029, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
{1031, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0},
/* Failure is related to EEPROM, e.g., failure during reading or parsing the data */
{1027, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0},
/* Failure during calibration algorithm */
{23, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0},
/* The hardware is not able to provide the power required from cable or module */
{1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
/* The module is overheated */
{1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
};
static void
mlx5e_set_link_ext_state(struct mlx5e_ethtool_link_ext_state_opcode_mapping
link_ext_state_mapping,
struct ethtool_link_ext_state_info *link_ext_state_info)
{
switch (link_ext_state_mapping.link_ext_state) {
case ETHTOOL_LINK_EXT_STATE_AUTONEG:
link_ext_state_info->autoneg =
link_ext_state_mapping.link_ext_substate;
break;
case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE:
link_ext_state_info->link_training =
link_ext_state_mapping.link_ext_substate;
break;
case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH:
link_ext_state_info->link_logical_mismatch =
link_ext_state_mapping.link_ext_substate;
break;
case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY:
link_ext_state_info->bad_signal_integrity =
link_ext_state_mapping.link_ext_substate;
break;
case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE:
link_ext_state_info->cable_issue =
link_ext_state_mapping.link_ext_substate;
break;
default:
break;
}
link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state;
}
static int
mlx5e_get_link_ext_state(struct net_device *dev,
struct ethtool_link_ext_state_info *link_ext_state_info)
{
struct mlx5e_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping;
struct mlx5e_priv *priv = netdev_priv(dev);
u32 status_opcode = 0;
int i;
/* Exit without data if the interface state is OK, since no extended data is
* available in such case
*/
if (netif_carrier_ok(dev))
return -ENODATA;
if (query_port_status_opcode(priv->mdev, &status_opcode) ||
!status_opcode)
return -ENODATA;
for (i = 0; i < ARRAY_SIZE(mlx5e_link_ext_state_opcode_map); i++) {
link_ext_state_mapping = mlx5e_link_ext_state_opcode_map[i];
if (link_ext_state_mapping.status_opcode == status_opcode) {
mlx5e_set_link_ext_state(link_ext_state_mapping,
link_ext_state_info);
return 0;
}
}
return -ENODATA;
}
static void mlx5e_get_eth_phy_stats(struct net_device *netdev,
struct ethtool_eth_phy_stats *phy_stats)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5e_stats_eth_phy_get(priv, phy_stats);
}
static void mlx5e_get_eth_mac_stats(struct net_device *netdev,
struct ethtool_eth_mac_stats *mac_stats)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5e_stats_eth_mac_get(priv, mac_stats);
}
static void mlx5e_get_eth_ctrl_stats(struct net_device *netdev,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5e_stats_eth_ctrl_get(priv, ctrl_stats);
}
static void mlx5e_get_rmon_stats(struct net_device *netdev,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5e_stats_rmon_get(priv, rmon_stats, ranges);
}
const struct ethtool_ops mlx5e_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_USE_CQE,
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ext_state = mlx5e_get_link_ext_state,
.get_strings = mlx5e_get_strings,
.get_sset_count = mlx5e_get_sset_count,
.get_ethtool_stats = mlx5e_get_ethtool_stats,
.get_ringparam = mlx5e_get_ringparam,
.set_ringparam = mlx5e_set_ringparam,
.get_channels = mlx5e_get_channels,
.set_channels = mlx5e_set_channels,
.get_coalesce = mlx5e_get_coalesce,
.set_coalesce = mlx5e_set_coalesce,
.get_link_ksettings = mlx5e_get_link_ksettings,
.set_link_ksettings = mlx5e_set_link_ksettings,
.get_rxfh_key_size = mlx5e_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxfh_context = mlx5e_get_rxfh_context,
.set_rxfh_context = mlx5e_set_rxfh_context,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pause_stats = mlx5e_get_pause_stats,
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
.set_phys_id = mlx5e_set_phys_id,
.get_wol = mlx5e_get_wol,
.set_wol = mlx5e_set_wol,
.get_module_info = mlx5e_get_module_info,
.get_module_eeprom = mlx5e_get_module_eeprom,
.get_module_eeprom_by_page = mlx5e_get_module_eeprom_by_page,
.flash_device = mlx5e_flash_device,
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
.get_fec_stats = mlx5e_get_fec_stats,
.get_fecparam = mlx5e_get_fecparam,
.set_fecparam = mlx5e_set_fecparam,
.get_eth_phy_stats = mlx5e_get_eth_phy_stats,
.get_eth_mac_stats = mlx5e_get_eth_mac_stats,
.get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
.get_rmon_stats = mlx5e_get_rmon_stats,
.get_link_ext_stats = mlx5e_get_link_ext_stats
};
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021 Mellanox Technologies. */
#include "fs_ft_pool.h"
/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
* and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
* for each flow table pool. We can allocate up to 16M of each pool,
* and we keep track of how much we used via mlx5_ft_pool_get_avail_sz.
* Firmware doesn't report any of this for now.
* ESW_POOL is expected to be sorted from large to small and match firmware
* pools.
*/
#define FT_SIZE (16 * 1024 * 1024)
static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024,
64 * 1024,
128,
1 /* size for termination tables */ };
struct mlx5_ft_pool {
int ft_left[ARRAY_SIZE(FT_POOLS)];
};
int mlx5_ft_pool_init(struct mlx5_core_dev *dev)
{
struct mlx5_ft_pool *ft_pool;
int i;
ft_pool = kzalloc(sizeof(*ft_pool), GFP_KERNEL);
if (!ft_pool)
return -ENOMEM;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
ft_pool->ft_left[i] = FT_SIZE / FT_POOLS[i];
dev->priv.ft_pool = ft_pool;
return 0;
}
void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev)
{
kfree(dev->priv.ft_pool);
}
int
mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
int desired_size)
{
u32 max_ft_size = 1 << MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_ft_size, table_type);
int i, found_i = -1;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
FT_POOLS[i] <= max_ft_size) {
found_i = i;
if (desired_size != POOL_NEXT_SIZE)
break;
}
}
if (found_i != -1) {
--dev->priv.ft_pool->ft_left[found_i];
return FT_POOLS[found_i];
}
return 0;
}
void
mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz)
{
int i;
if (!sz)
return;
for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
if (sz == FT_POOLS[i]) {
++dev->priv.ft_pool->ft_left[i];
return;
}
}
WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies */
#include <linux/mlx5/vport.h>
#include <rdma/ib_verbs.h>
#include <net/addrconf.h>
#include "lib/mlx5.h"
#include "eswitch.h"
#include "fs_core.h"
#include "rdma.h"
static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
{
struct mlx5_core_roce *roce = &dev->priv.roce;
mlx5_del_flow_rules(roce->allow_rule);
mlx5_destroy_flow_group(roce->fg);
mlx5_destroy_flow_table(roce->ft);
}
static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_roce *roce = &dev->priv.roce;
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct mlx5_eswitch *esw;
u32 *flow_group_in;
int err;
if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)))
return -EOPNOTSUPP;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
kvfree(flow_group_in);
return -ENOMEM;
}
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL);
if (!ns) {
mlx5_core_err(dev, "Failed to get RDMA RX namespace");
err = -EOPNOTSUPP;
goto free;
}
ft_attr.max_fte = 1;
ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
mlx5_core_err(dev, "Failed to create RDMA RX flow table");
err = PTR_ERR(ft);
goto free;
}
esw = dev->priv.eswitch;
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
fg = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(fg)) {
err = PTR_ERR(fg);
mlx5_core_err(dev, "Failed to create RDMA RX flow group err(%d)\n", err);
goto destroy_flow_table;
}
mlx5_esw_set_spec_source_port(esw, esw->manager_vport, spec);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
mlx5_core_err(dev, "Failed to add RoCE allow rule, err=%d\n",
err);
goto destroy_flow_group;
}
kvfree(spec);
kvfree(flow_group_in);
roce->ft = ft;
roce->fg = fg;
roce->allow_rule = flow_rule;
return 0;
destroy_flow_group:
mlx5_destroy_flow_group(fg);
destroy_flow_table:
mlx5_destroy_flow_table(ft);
free:
kvfree(spec);
kvfree(flow_group_in);
return err;
}
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, MLX5_ROCE_VERSION_2, 0,
NULL, NULL, false, 0, 1);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
{
u8 hw_id[ETH_ALEN];
mlx5_query_mac_address(dev, hw_id);
gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
addrconf_addr_eui48(&gid->raw[8], hw_id);
}
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
{
union ib_gid gid;
u8 mac[ETH_ALEN];
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
MLX5_ROCE_VERSION_2,
0, gid.raw, mac,
false, 0, 1);
}
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
{
struct mlx5_core_roce *roce = &dev->priv.roce;
if (!roce->ft)
return;
mlx5_rdma_disable_roce_steering(dev);
mlx5_rdma_del_roce_addr(dev);
mlx5_nic_vport_disable_roce(dev);
}
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
{
int err;
if (!MLX5_CAP_GEN(dev, roce))
return;
err = mlx5_nic_vport_enable_roce(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
return;
}
err = mlx5_rdma_add_roce_addr(dev);
if (err) {
mlx5_core_err(dev, "Failed to add RoCE address: %d\n", err);
goto disable_roce;
}
err = mlx5_rdma_enable_roce_steering(dev);
if (err) {
mlx5_core_err(dev, "Failed to enable RoCE steering: %d\n", err);
goto del_roce_addr;
}
return;
del_roce_addr:
mlx5_rdma_del_roce_addr(dev);
disable_roce:
mlx5_nic_vport_disable_roce(dev);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
|
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/irq.h>
#include <net/xdp_sock_drv.h>
#include "en.h"
#include "en/txrx.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
#include "en_accel/ktls_txrx.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
int current_cpu = smp_processor_id();
return cpumask_test_cpu(current_cpu, c->aff_mask);
}
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{
struct mlx5e_sq_stats *stats = sq->stats;
struct dim_sample dim_sample = {};
if (unlikely(!test_bit(MLX5E_SQ_STATE_DIM, &sq->state)))
return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&sq->dim, dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{
struct mlx5e_rq_stats *stats = rq->stats;
struct dim_sample dim_sample = {};
if (unlikely(!test_bit(MLX5E_RQ_STATE_DIM, &rq->state)))
return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&rq->dim, dim_sample);
}
void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_tx_wqe *nopwqe;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_NOP,
.num_wqebbs = 1,
};
nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq)
{
bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool);
bool busy_xsk = false, xsk_rx_alloc_err;
/* If SQ is empty, there are no TX completions to trigger NAPI, so set
* need_wakeup. Do it before queuing packets for TX to avoid race
* condition with userspace.
*/
if (need_wakeup && xsksq->pc == xsksq->cc)
xsk_set_tx_need_wakeup(xsksq->xsk_pool);
busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
/* If we queued some packets for TX, no need for wakeup anymore. */
if (need_wakeup && xsksq->pc != xsksq->cc)
xsk_clear_tx_need_wakeup(xsksq->xsk_pool);
/* If WQ is empty, RX won't trigger NAPI, so set need_wakeup. Do it
* before refilling to avoid race condition with userspace.
*/
if (need_wakeup && !mlx5e_rqwq_get_cur_sz(xskrq))
xsk_set_rx_need_wakeup(xskrq->xsk_pool);
xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_post_rx_wqes,
xskrq);
/* Ask for wakeup if WQ is not full after refill. */
if (!need_wakeup)
busy_xsk |= xsk_rx_alloc_err;
else if (xsk_rx_alloc_err)
xsk_set_rx_need_wakeup(xskrq->xsk_pool);
else
xsk_clear_rx_need_wakeup(xskrq->xsk_pool);
return busy_xsk;
}
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
struct mlx5e_xdpsq *xsksq = &c->xsksq;
struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_rq *xskrq = &c->xskrq;
struct mlx5e_rq *rq = &c->rq;
bool aff_change = false;
bool busy_xsk = false;
bool busy = false;
int work_done = 0;
u16 qos_sqs_size;
bool xsk_open;
int i;
rcu_read_lock();
qos_sqs = rcu_dereference(c->qos_sqs);
xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
ch_stats->poll++;
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
if (unlikely(qos_sqs)) {
smp_rmb(); /* Pairs with mlx5e_qos_alloc_queues. */
qos_sqs_size = READ_ONCE(c->qos_sqs_size);
for (i = 0; i < qos_sqs_size; i++) {
struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
if (sq)
busy |= mlx5e_poll_tx_cq(&sq->cq, budget);
}
}
/* budget=0 means we may be in IRQ context, do as little as possible */
if (unlikely(!budget))
goto out;
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdp)
busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
if (xsk_open)
work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
if (likely(budget - work_done))
work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
busy |= work_done == budget;
mlx5e_poll_ico_cq(&c->icosq.cq);
if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
/* Don't clear the flag if nothing was polled to prevent
* queueing more WQEs and overflowing the async ICOSQ.
*/
clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
/* Keep after async ICOSQ CQ poll */
if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
busy |= INDIRECT_CALL_2(rq->post_wqes,
mlx5e_post_rx_mpwqes,
mlx5e_post_rx_wqes,
rq);
if (xsk_open) {
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
}
busy |= busy_xsk;
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c))) {
work_done = budget;
goto out;
}
ch_stats->aff_change++;
aff_change = true;
if (work_done == budget)
work_done--;
}
if (unlikely(!napi_complete_done(napi, work_done)))
goto out;
ch_stats->arm++;
for (i = 0; i < c->num_tc; i++) {
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
}
if (unlikely(qos_sqs)) {
for (i = 0; i < qos_sqs_size; i++) {
struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
if (sq) {
mlx5e_handle_tx_dim(sq);
mlx5e_cq_arm(&sq->cq);
}
}
}
mlx5e_handle_rx_dim(rq);
mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->async_icosq.cq);
mlx5e_cq_arm(&c->xdpsq.cq);
if (xsk_open) {
mlx5e_handle_rx_dim(xskrq);
mlx5e_cq_arm(&xsksq->cq);
mlx5e_cq_arm(&xskrq->cq);
}
if (unlikely(aff_change && busy_xsk)) {
mlx5e_trigger_irq(&c->icosq);
ch_stats->force_irq++;
}
out:
rcu_read_unlock();
return work_done;
}
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
napi_schedule(cq->napi);
cq->event_ctr++;
cq->ch_stats->events++;
}
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
struct net_device *netdev = cq->netdev;
netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
__func__, mcq->cqn, event);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
|
/*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/mlx5_ifc.h>
#include "fs_core.h"
#include "fs_cmd.h"
#include "fs_ft_pool.h"
#include "mlx5_core.h"
#include "eswitch.h"
static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect)
{
return 0;
}
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
int max_fte = ft_attr->max_fte;
ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
return 0;
}
static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
return 0;
}
static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
return 0;
}
static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
struct mlx5_flow_group *fg)
{
return 0;
}
static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
return 0;
}
static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
{
return 0;
}
static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
int modify_mask,
struct fs_fte *fte)
{
return -EOPNOTSUPP;
}
static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return 0;
}
static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
return 0;
}
static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat)
{
}
static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
u8 namespace, u8 num_actions,
void *modify_actions,
struct mlx5_modify_hdr *modify_hdr)
{
return 0;
}
static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr)
{
}
static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u16 peer_vhca_id)
{
return 0;
}
static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
{
return 0;
}
static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
{
return 0;
}
static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
return 0;
}
static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave,
bool ft_id_valid,
u32 ft_id)
{
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_namespace *ns;
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type,
FS_FT_FDB);
if (ft_id_valid) {
MLX5_SET(set_flow_table_root_in, in,
table_eswitch_owner_vhca_id_valid, 1);
MLX5_SET(set_flow_table_root_in, in,
table_eswitch_owner_vhca_id,
MLX5_CAP_GEN(master, vhca_id));
MLX5_SET(set_flow_table_root_in, in, table_id,
ft_id);
} else {
ns = mlx5_get_flow_namespace(slave,
MLX5_FLOW_NAMESPACE_FDB);
root = find_root(&ns->node);
MLX5_SET(set_flow_table_root_in, in, table_id,
root->root_ft->id);
}
return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
}
static int
mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
int definer_id)
{
return 0;
}
static int
mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask)
{
return 0;
}
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
underlay_qpn == 0)
return 0;
if (ft->type == FS_FT_FDB &&
mlx5_lag_is_shared_fdb(dev) &&
!mlx5_lag_is_master(dev))
return 0;
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
if (disconnect)
MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
else
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
MLX5_SET(set_flow_table_root_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
if (!err &&
ft->type == FS_FT_FDB &&
mlx5_lag_is_shared_fdb(dev) &&
mlx5_lag_is_master(dev)) {
struct mlx5_core_dev *peer_dev;
int i, j;
mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect,
(!disconnect) ? ft->id : 0);
if (err && !disconnect) {
mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) {
if (j < i)
mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1,
ns->root_ft->id);
else
break;
}
MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
MLX5_SET(set_flow_table_root_in, in, table_id,
ns->root_ft->id);
mlx5_cmd_exec_in(dev, set_flow_table_root, in);
}
if (err)
break;
}
}
return err;
}
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table_attr *ft_attr,
struct mlx5_flow_table *next_ft)
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
unsigned int size;
int err;
size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
if (!size)
return -ENOSPC;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_decap);
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap);
MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
term);
switch (ft->op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_action,
MLX5_FLOW_TABLE_MISS_ACTION_FWD);
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_id, next_ft->id);
} else {
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_action,
ft->def_miss_action);
}
break;
case FS_FT_OP_MOD_LAG_DEMUX:
MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
if (next_ft)
MLX5_SET(create_flow_table_in, in,
flow_table_context.lag_master_next_table_id,
next_ft->id);
break;
}
err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
if (!err) {
ft->id = MLX5_GET(create_flow_table_out, out,
table_id);
ft->max_fte = size;
} else {
mlx5_ft_pool_put_sz(ns->dev, size);
}
return err;
}
static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
if (!err)
mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
return err;
}
static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(modify_flow_table_in, in, opcode,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.lag_master_next_table_id, next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.lag_master_next_table_id, 0);
}
} else {
MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(modify_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action,
MLX5_FLOW_TABLE_MISS_ACTION_FWD);
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_id,
next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action,
ft->def_miss_action);
}
}
return mlx5_cmd_exec_in(dev, modify_flow_table, in);
}
static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
struct mlx5_flow_group *fg)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
struct mlx5_core_dev *dev = ns->dev;
int err;
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
group_id);
return err;
}
static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
struct fs_fte *fte, bool *extended_dest)
{
int fw_log_max_fdb_encap_uplink =
MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
int num_fwd_destinations = 0;
struct mlx5_flow_rule *dst;
int num_encap = 0;
*extended_dest = false;
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE)
continue;
if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
}
if (num_fwd_destinations > 1 && num_encap > 0)
*extended_dest = true;
if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
mlx5_core_warn(dev, "FW does not support extended destination");
return -EOPNOTSUPP;
}
if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
mlx5_core_warn(dev, "FW does not support more than %d encaps",
1 << fw_log_max_fdb_encap_uplink);
return -EOPNOTSUPP;
}
return 0;
}
static void
mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
{
void *exe_aso_ctrl;
void *execute_aso;
execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
execute_aso[0]);
MLX5_SET(execute_aso, execute_aso, valid, 1);
MLX5_SET(execute_aso, execute_aso, aso_object_id,
fte->action.exe_aso.object_id);
exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
fte->action.exe_aso.return_reg_id);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
fte->action.exe_aso.type);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
fte->action.exe_aso.flow_meter.init_color);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
fte->action.exe_aso.flow_meter.meter_idx);
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5_flow_table *ft,
unsigned group_id,
struct fs_fte *fte)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
bool extended_dest = false;
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
int reformat_id = 0;
unsigned int inlen;
int dst_cnt_size;
u32 *in, action;
void *in_dests;
int err;
if (mlx5_set_extended_dest(dev, fte, &extended_dest))
return -EOPNOTSUPP;
if (!extended_dest)
dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level,
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag,
fte->flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
action = fte->action.action;
if (extended_dest)
action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
if (!extended_dest && fte->action.pkt_reformat) {
struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat;
if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
if (reformat_id < 0) {
mlx5_core_err(dev,
"Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n",
pkt_reformat->reformat_type);
err = reformat_id;
goto err_out;
}
} else {
reformat_id = fte->action.pkt_reformat->id;
}
}
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
if (fte->action.modify_hdr) {
if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
err = -EOPNOTSUPP;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
}
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
fte->action.crypto.type);
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
fte->action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
enum mlx5_flow_destination_type type = dst->dest_attr.type;
enum mlx5_ifc_flow_destination_type ifc_type;
unsigned int id;
if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
switch (type) {
case MLX5_FLOW_DESTINATION_TYPE_NONE:
continue;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
id = dst->dest_attr.ft_num;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
break;
case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(dst->dest_attr.vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
/* destination_id is reserved */
id = 0;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
break;
}
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
MLX5_SET(dest_format_struct, in_dests,
packet_reformat,
!!(dst->dest_attr.vport.flags &
MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
MLX5_SET(extended_dest_format, in_dests,
packet_reformat_id,
dst->dest_attr.vport.pkt_reformat->id);
}
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
id = dst->dest_attr.sampler_id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
MLX5_SET(dest_format_struct, in_dests,
destination_table_type, dst->dest_attr.ft->type);
id = dst->dest_attr.ft->id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
break;
default:
id = dst->dest_attr.tir_num;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
}
MLX5_SET(dest_format_struct, in_dests, destination_type,
ifc_type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += dst_cnt_size;
list_size++;
}
MLX5_SET(flow_context, in_flow_context, destination_list_size,
list_size);
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type !=
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
dst->dest_attr.counter_id);
in_dests += dst_cnt_size;
list_size++;
}
if (list_size > max_list_size) {
err = -EINVAL;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
list_size);
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
} else {
err = -EOPNOTSUPP;
goto err_out;
}
}
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
err_out:
kvfree(in);
return err;
}
static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
{
struct mlx5_core_dev *dev = ns->dev;
unsigned int group_id = group->id;
return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
int opmod;
struct mlx5_core_dev *dev = ns->dev;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
return -EOPNOTSUPP;
opmod = 1;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
}
static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id);
MLX5_SET(delete_fte_in, in, flow_index, fte->index);
MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
MLX5_SET(delete_fte_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, delete_fte, in);
}
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id)
{
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
int err;
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
if (!err)
*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
return err;
}
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
{
return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
}
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
MLX5_SET(dealloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
}
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes)
{
u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter)] = {};
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
void *stats;
int err = 0;
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
return 0;
}
int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
{
return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
}
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out)
{
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
struct mlx5_core_dev *dev = ns->dev;
void *packet_reformat_context_in;
int max_encap_size;
void *reformat;
int inlen;
int err;
u32 *in;
if (namespace == MLX5_FLOW_NAMESPACE_FDB ||
namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS)
max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
else
max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
if (params->size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
params->size, max_encap_size);
return -EINVAL;
}
in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
params->size, GFP_KERNEL);
if (!in)
return -ENOMEM;
packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
in, packet_reformat_context);
reformat = MLX5_ADDR_OF(packet_reformat_context_in,
packet_reformat_context_in,
reformat_data);
inlen = reformat - (void *)in + params->size;
MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_data_size, params->size);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_type, params->type);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_param_0, params->param_0);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
reformat_param_1, params->param_1);
if (params->data && params->size)
memcpy(reformat, params->data, params->size);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
out, packet_reformat_id);
pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_FW;
kfree(in);
return err;
}
static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_pkt_reformat *pkt_reformat)
{
u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
pkt_reformat->id);
mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
}
static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
u8 namespace, u8 num_actions,
void *modify_actions,
struct mlx5_modify_hdr *modify_hdr)
{
u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
int max_actions, actions_size, inlen, err;
struct mlx5_core_dev *dev = ns->dev;
void *actions_in;
u8 table_type;
u32 *in;
switch (namespace) {
case MLX5_FLOW_NAMESPACE_FDB:
case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
table_type = FS_FT_FDB;
break;
case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_BYPASS:
max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
table_type = FS_FT_ESW_INGRESS_ACL;
break;
case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC:
case MLX5_FLOW_NAMESPACE_RDMA_TX:
max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
table_type = FS_FT_RDMA_TX;
break;
default:
return -EOPNOTSUPP;
}
if (num_actions > max_actions) {
mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
num_actions, max_actions);
return -EOPNOTSUPP;
}
actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
in = kzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(alloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
memcpy(actions_in, modify_actions, actions_size);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_FW;
kfree(in);
return err;
}
static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
struct mlx5_modify_hdr *modify_hdr)
{
u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(dealloc_modify_header_context_in, in, opcode,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
modify_hdr->id);
mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
int definer_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_OBJ_TYPE_MATCH_DEFINER);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
u16 format_id, u32 *match_mask)
{
u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
void *ptr;
int err;
MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
MLX5_OBJ_TYPE_MATCH_DEFINER);
ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
MLX5_SET(match_definer, ptr, format_id, format_id);
ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
}
static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
return 0;
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
.modify_flow_table = mlx5_cmd_modify_flow_table,
.create_flow_group = mlx5_cmd_create_flow_group,
.destroy_flow_group = mlx5_cmd_destroy_flow_group,
.create_fte = mlx5_cmd_create_fte,
.update_fte = mlx5_cmd_update_fte,
.delete_fte = mlx5_cmd_delete_fte,
.update_root_ft = mlx5_cmd_update_root_ft,
.packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
.create_match_definer = mlx5_cmd_create_match_definer,
.destroy_match_definer = mlx5_cmd_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
.get_capabilities = mlx5_cmd_get_capabilities,
};
static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.create_flow_table = mlx5_cmd_stub_create_flow_table,
.destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
.modify_flow_table = mlx5_cmd_stub_modify_flow_table,
.create_flow_group = mlx5_cmd_stub_create_flow_group,
.destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
.create_fte = mlx5_cmd_stub_create_fte,
.update_fte = mlx5_cmd_stub_update_fte,
.delete_fte = mlx5_cmd_stub_delete_fte,
.update_root_ft = mlx5_cmd_stub_update_root_ft,
.packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
.create_match_definer = mlx5_cmd_stub_create_match_definer,
.destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
.get_capabilities = mlx5_cmd_stub_get_capabilities,
};
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
{
return &mlx5_flow_cmds;
}
static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
{
return &mlx5_flow_cmd_stubs;
}
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
{
switch (type) {
case FS_FT_NIC_RX:
case FS_FT_ESW_EGRESS_ACL:
case FS_FT_ESW_INGRESS_ACL:
case FS_FT_FDB:
case FS_FT_SNIFFER_RX:
case FS_FT_SNIFFER_TX:
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
case FS_FT_PORT_SEL:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
}
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
|
/*
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include <linux/mlx5/transobj.h>
int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
int err;
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
return err;
}
EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
}
EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{
u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {};
int err;
MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqn = MLX5_GET(create_rq_out, out, rqn);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_rq);
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in)
{
MLX5_SET(modify_rq_in, in, rqn, rqn);
MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
return mlx5_cmd_exec_in(dev, modify_rq, in);
}
EXPORT_SYMBOL(mlx5_core_modify_rq);
void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn);
mlx5_cmd_exec_in(dev, destroy_rq, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_rq);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {};
MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
MLX5_SET(query_rq_in, in, rqn, rqn);
return mlx5_cmd_exec_inout(dev, query_rq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_rq);
int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
{
u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
int err;
MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*sqn = MLX5_GET(create_sq_out, out, sqn);
return err;
}
int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in)
{
MLX5_SET(modify_sq_in, in, sqn, sqn);
MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
return mlx5_cmd_exec_in(dev, modify_sq, in);
}
EXPORT_SYMBOL(mlx5_core_modify_sq);
void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
{
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn);
mlx5_cmd_exec_in(dev, destroy_sq, in);
}
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
{
u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {};
MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
MLX5_SET(query_sq_in, in, sqn, sqn);
return mlx5_cmd_exec_inout(dev, query_sq, in, out);
}
EXPORT_SYMBOL(mlx5_core_query_sq);
int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state)
{
void *out;
void *sqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(query_sq_out);
out = kvzalloc(inlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
err = mlx5_core_query_sq(dev, sqn, out);
if (err)
goto out;
sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context);
*state = MLX5_GET(sqc, sqc, state);
out:
kvfree(out);
return err;
}
EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn)
{
u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
err = mlx5_cmd_exec_inout(dev, create_tir, in, out);
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_tir);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in)
{
MLX5_SET(modify_tir_in, in, tirn, tirn);
MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
return mlx5_cmd_exec_in(dev, modify_tir, in);
}
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
MLX5_SET(destroy_tir_in, in, tirn, tirn);
mlx5_cmd_exec_in(dev, destroy_tir, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_tir);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn)
{
u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {};
int err;
MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
err = mlx5_cmd_exec_inout(dev, create_tis, in, out);
if (!err)
*tisn = MLX5_GET(create_tis_out, out, tisn);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_tis);
int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in)
{
MLX5_SET(modify_tis_in, in, tisn, tisn);
MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
return mlx5_cmd_exec_in(dev, modify_tis, in);
}
EXPORT_SYMBOL(mlx5_core_modify_tis);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
{
u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
MLX5_SET(destroy_tis_in, in, tisn, tisn);
mlx5_cmd_exec_in(dev, destroy_tis, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {};
int err;
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
return err;
}
EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
{
u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {};
MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
{
u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
mlx5_cmd_exec_in(dev, destroy_rqt, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_rqt);
static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
struct mlx5_hairpin_params *params, u32 *rqn)
{
u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0};
void *rqc, *wq;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(rqc, rqc, hairpin, 1);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
}
static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
struct mlx5_hairpin_params *params, u32 *sqn)
{
u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
void *sqc, *wq;
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(sqc, sqc, hairpin, 1);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
}
static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
struct mlx5_hairpin_params *params)
{
int i, j, err;
for (i = 0; i < hp->num_channels; i++) {
err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn[i]);
if (err)
goto out_err_rq;
}
for (i = 0; i < hp->num_channels; i++) {
err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn[i]);
if (err)
goto out_err_sq;
}
return 0;
out_err_sq:
for (j = 0; j < i; j++)
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]);
i = hp->num_channels;
out_err_rq:
for (j = 0; j < i; j++)
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[j]);
return err;
}
static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
{
int i;
for (i = 0; i < hp->num_channels; i++) {
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
if (!hp->peer_gone)
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
}
}
static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
int curr_state, int next_state,
u16 peer_vhca, u32 peer_sq)
{
u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
void *rqc;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
if (next_state == MLX5_RQC_STATE_RDY) {
MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq);
MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca);
}
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
return mlx5_core_modify_rq(func_mdev, rqn, in);
}
static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
int curr_state, int next_state,
u16 peer_vhca, u32 peer_rq)
{
u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0};
void *sqc;
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
if (next_state == MLX5_SQC_STATE_RDY) {
MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
}
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
return mlx5_core_modify_sq(peer_mdev, sqn, in);
}
static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
{
int i, j, err;
/* set peer SQs */
for (i = 0; i < hp->num_channels; i++) {
err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i],
MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn[i]);
if (err)
goto err_modify_sq;
}
/* set func RQs */
for (i = 0; i < hp->num_channels; i++) {
err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i],
MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn[i]);
if (err)
goto err_modify_rq;
}
return 0;
err_modify_rq:
for (j = 0; j < i; j++)
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[j], MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0);
i = hp->num_channels;
err_modify_sq:
for (j = 0; j < i; j++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[j], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
return err;
}
static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
{
int i;
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
}
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
{
int i;
/* unset func RQs */
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0);
/* unset peer SQs */
if (!hp->peer_gone)
mlx5_hairpin_unpair_peer_sq(hp);
}
struct mlx5_hairpin *
mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_core_dev *peer_mdev,
struct mlx5_hairpin_params *params)
{
struct mlx5_hairpin *hp;
int size, err;
size = sizeof(*hp) + params->num_channels * 2 * sizeof(u32);
hp = kzalloc(size, GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
hp->func_mdev = func_mdev;
hp->peer_mdev = peer_mdev;
hp->num_channels = params->num_channels;
hp->rqn = (void *)hp + sizeof(*hp);
hp->sqn = hp->rqn + params->num_channels;
/* alloc and pair func --> peer hairpin */
err = mlx5_hairpin_create_queues(hp, params);
if (err)
goto err_create_queues;
err = mlx5_hairpin_pair_queues(hp);
if (err)
goto err_pair_queues;
return hp;
err_pair_queues:
mlx5_hairpin_destroy_queues(hp);
err_create_queues:
kfree(hp);
return ERR_PTR(err);
}
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
{
mlx5_hairpin_unpair_queues(hp);
mlx5_hairpin_destroy_queues(hp);
kfree(hp);
}
void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
{
int i;
mlx5_hairpin_unpair_peer_sq(hp);
/* destroy peer SQ */
for (i = 0; i < hp->num_channels; i++)
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
hp->peer_gone = true;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
|
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
#include "lib/eq.h"
enum {
QP_PID,
QP_STATE,
QP_XPORT,
QP_MTU,
QP_N_RECV,
QP_RECV_SZ,
QP_N_SEND,
QP_LOG_PG_SZ,
QP_RQPN,
};
static char *qp_fields[] = {
[QP_PID] = "pid",
[QP_STATE] = "state",
[QP_XPORT] = "transport",
[QP_MTU] = "mtu",
[QP_N_RECV] = "num_recv",
[QP_RECV_SZ] = "rcv_wqe_sz",
[QP_N_SEND] = "num_send",
[QP_LOG_PG_SZ] = "log2_page_sz",
[QP_RQPN] = "remote_qpn",
};
enum {
EQ_NUM_EQES,
EQ_INTR,
EQ_LOG_PG_SZ,
};
static char *eq_fields[] = {
[EQ_NUM_EQES] = "num_eqes",
[EQ_INTR] = "intr",
[EQ_LOG_PG_SZ] = "log_page_size",
};
enum {
CQ_PID,
CQ_NUM_CQES,
CQ_LOG_PG_SZ,
};
static char *cq_fields[] = {
[CQ_PID] = "pid",
[CQ_NUM_CQES] = "num_cqes",
[CQ_LOG_PG_SZ] = "log_page_size",
};
struct dentry *mlx5_debugfs_root;
EXPORT_SYMBOL(mlx5_debugfs_root);
void mlx5_register_debugfs(void)
{
mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
}
void mlx5_unregister_debugfs(void)
{
debugfs_remove(mlx5_debugfs_root);
}
struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
{
return dev->priv.dbg.dbg_root;
}
EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
}
static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct mlx5_cmd_stats *stats;
u64 field = 0;
int ret;
char tbuf[22];
stats = filp->private_data;
spin_lock_irq(&stats->lock);
if (stats->n)
field = div64_u64(stats->sum, stats->n);
spin_unlock_irq(&stats->lock);
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static ssize_t average_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct mlx5_cmd_stats *stats;
stats = filp->private_data;
spin_lock_irq(&stats->lock);
stats->sum = 0;
stats->n = 0;
spin_unlock_irq(&stats->lock);
*pos += count;
return count;
}
static const struct file_operations stats_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = average_read,
.write = average_write,
};
static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct mlx5_cmd *cmd;
char tbuf[6];
int weight;
int field;
int ret;
cmd = filp->private_data;
weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
field = cmd->vars.max_reg_cmds - weight;
ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static const struct file_operations slots_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = slots_read,
};
static struct mlx5_cmd_stats *
mlx5_cmdif_alloc_stats(struct xarray *stats_xa, int opcode)
{
struct mlx5_cmd_stats *stats = kzalloc(sizeof(*stats), GFP_KERNEL);
int err;
if (!stats)
return NULL;
err = xa_insert(stats_xa, opcode, stats, GFP_KERNEL);
if (err) {
kfree(stats);
return NULL;
}
spin_lock_init(&stats->lock);
return stats;
}
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
struct dentry **cmd;
const char *namep;
int i;
cmd = &dev->priv.dbg.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
xa_init(&dev->cmd.stats);
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
stats = mlx5_cmdif_alloc_stats(&dev->cmd.stats, i);
if (!stats)
continue;
stats->root = debugfs_create_dir(namep, *cmd);
debugfs_create_file("average", 0400, stats->root, stats,
&stats_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
debugfs_create_u64("failed_mbox_status", 0400, stats->root,
&stats->failed_mbox_status);
debugfs_create_u32("last_failed_errno", 0400, stats->root,
&stats->last_failed_errno);
debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
&stats->last_failed_mbox_status);
debugfs_create_x32("last_failed_syndrome", 0400, stats->root,
&stats->last_failed_syndrome);
}
}
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
unsigned long i;
debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
xa_for_each(&dev->cmd.stats, i, stats)
kfree(stats);
xa_destroy(&dev->cmd.stats);
}
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
}
void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
{
struct dentry *pages;
dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
pages = dev->priv.dbg.pages_debugfs;
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
debugfs_create_u32("fw_pages_ec_vfs", 0400, pages, &dev->priv.page_counters[MLX5_EC_VF]);
debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
&dev->priv.reclaim_pages_discard);
}
void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
{
debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
int index, int *is_str)
{
int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
u64 param = 0;
u32 *out;
int state;
u32 *qpc;
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return 0;
MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
MLX5_SET(query_qp_in, in, qpn, qp->qpn);
err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
if (err)
goto out;
*is_str = 0;
qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
switch (index) {
case QP_PID:
param = qp->pid;
break;
case QP_STATE:
state = MLX5_GET(qpc, qpc, state);
param = (unsigned long)mlx5_qp_state_str(state);
*is_str = 1;
break;
case QP_XPORT:
param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
*is_str = 1;
break;
case QP_MTU:
switch (MLX5_GET(qpc, qpc, mtu)) {
case IB_MTU_256:
param = 256;
break;
case IB_MTU_512:
param = 512;
break;
case IB_MTU_1024:
param = 1024;
break;
case IB_MTU_2048:
param = 2048;
break;
case IB_MTU_4096:
param = 4096;
break;
default:
param = 0;
}
break;
case QP_N_RECV:
param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
break;
case QP_RECV_SZ:
param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
break;
case QP_N_SEND:
if (!MLX5_GET(qpc, qpc, no_sq))
param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
break;
case QP_LOG_PG_SZ:
param = MLX5_GET(qpc, qpc, log_page_size) + 12;
break;
case QP_RQPN:
param = MLX5_GET(qpc, qpc, remote_qpn);
break;
}
out:
kfree(out);
return param;
}
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
u64 param = 0;
void *ctx;
u32 *out;
int err;
out = kzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
if (err) {
mlx5_core_warn(dev, "failed to query eq\n");
goto out;
}
ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
switch (index) {
case EQ_NUM_EQES:
param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
break;
case EQ_INTR:
param = MLX5_GET(eqc, ctx, intr);
break;
case EQ_LOG_PG_SZ:
param = MLX5_GET(eqc, ctx, log_page_size) + 12;
break;
}
out:
kfree(out);
return param;
}
static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
int index)
{
int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
u64 param = 0;
void *ctx;
u32 *out;
int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return param;
err = mlx5_core_query_cq(dev, cq, out);
if (err) {
mlx5_core_warn(dev, "failed to query cq\n");
goto out;
}
ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
switch (index) {
case CQ_PID:
param = cq->pid;
break;
case CQ_NUM_CQES:
param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
break;
case CQ_LOG_PG_SZ:
param = MLX5_GET(cqc, ctx, log_page_size);
break;
}
out:
kvfree(out);
return param;
}
static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
loff_t *pos)
{
struct mlx5_field_desc *desc;
struct mlx5_rsc_debug *d;
char tbuf[18];
int is_str = 0;
u64 field;
int ret;
desc = filp->private_data;
d = (void *)(desc - desc->i) - sizeof(*d);
switch (d->type) {
case MLX5_DBG_RSC_QP:
field = qp_read_field(d->dev, d->object, desc->i, &is_str);
break;
case MLX5_DBG_RSC_EQ:
field = eq_read_field(d->dev, d->object, desc->i);
break;
case MLX5_DBG_RSC_CQ:
field = cq_read_field(d->dev, d->object, desc->i);
break;
default:
mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
return -EINVAL;
}
if (is_str)
ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
else
ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = dbg_read,
};
static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
struct dentry *root, struct mlx5_rsc_debug **dbg,
int rsn, char **field, int nfile, void *data)
{
struct mlx5_rsc_debug *d;
char resn[32];
int i;
d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
if (!d)
return -ENOMEM;
d->dev = dev;
d->object = data;
d->type = type;
sprintf(resn, "0x%x", rsn);
d->root = debugfs_create_dir(resn, root);
for (i = 0; i < nfile; i++) {
d->fields[i].i = i;
debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
&fops);
}
*dbg = d;
return 0;
}
static void rem_res_tree(struct mlx5_rsc_debug *d)
{
debugfs_remove_recursive(d->root);
kfree(d);
}
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
{
int err;
if (!mlx5_debugfs_root)
return 0;
err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
&qp->dbg, qp->qpn, qp_fields,
ARRAY_SIZE(qp_fields), qp);
if (err)
qp->dbg = NULL;
return err;
}
EXPORT_SYMBOL(mlx5_debug_qp_add);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
{
if (!mlx5_debugfs_root || !qp->dbg)
return;
rem_res_tree(qp->dbg);
qp->dbg = NULL;
}
EXPORT_SYMBOL(mlx5_debug_qp_remove);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
int err;
if (!mlx5_debugfs_root)
return 0;
err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
&eq->dbg, eq->eqn, eq_fields,
ARRAY_SIZE(eq_fields), eq);
if (err)
eq->dbg = NULL;
return err;
}
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
if (!mlx5_debugfs_root)
return;
if (eq->dbg)
rem_res_tree(eq->dbg);
}
int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
int err;
if (!mlx5_debugfs_root)
return 0;
err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
&cq->dbg, cq->cqn, cq_fields,
ARRAY_SIZE(cq_fields), cq);
if (err)
cq->dbg = NULL;
return err;
}
void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
if (!mlx5_debugfs_root)
return;
if (cq->dbg) {
rem_res_tree(cq->dbg);
cq->dbg = NULL;
}
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
|
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
#include <linux/xarray.h>
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/tout.h"
enum {
MLX5_PAGES_CANT_GIVE = 0,
MLX5_PAGES_GIVE = 1,
MLX5_PAGES_TAKE = 2
};
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u16 func_id;
u8 ec_function;
s32 npages;
struct work_struct work;
u8 release_all;
};
struct fw_page {
struct rb_node rb_node;
u64 addr;
struct page *page;
u32 function;
unsigned long bitmask;
struct list_head list;
unsigned int free_count;
};
enum {
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
static u32 get_function(u16 func_id, bool ec_function)
{
return (u32)func_id | (ec_function << 16);
}
static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
{
if (!func_id)
return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
if (func_id <= max(mlx5_core_max_vfs(dev), mlx5_core_max_ec_vfs(dev))) {
if (ec_function)
return MLX5_EC_VF;
else
return MLX5_VF;
}
return MLX5_SF;
}
static u32 mlx5_get_ec_function(u32 function)
{
return function >> 16;
}
static u32 mlx5_get_func_id(u32 function)
{
return function & 0xffff;
}
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
int err;
root = xa_load(&dev->priv.page_root_xa, function);
if (root)
return root;
root = kzalloc(sizeof(*root), GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
if (err) {
kfree(root);
return ERR_PTR(err);
}
*root = RB_ROOT;
return root;
}
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
{
struct rb_node *parent = NULL;
struct rb_root *root;
struct rb_node **new;
struct fw_page *nfp;
struct fw_page *tfp;
int i;
root = page_root_per_function(dev, function);
if (IS_ERR(root))
return PTR_ERR(root);
new = &root->rb_node;
while (*new) {
parent = *new;
tfp = rb_entry(parent, struct fw_page, rb_node);
if (tfp->addr < addr)
new = &parent->rb_left;
else if (tfp->addr > addr)
new = &parent->rb_right;
else
return -EEXIST;
}
nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
if (!nfp)
return -ENOMEM;
nfp->addr = addr;
nfp->page = page;
nfp->function = function;
nfp->free_count = MLX5_NUM_4K_IN_PAGE;
for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
set_bit(i, &nfp->bitmask);
rb_link_node(&nfp->rb_node, parent, new);
rb_insert_color(&nfp->rb_node, root);
list_add(&nfp->list, &dev->priv.free_list);
return 0;
}
static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
u32 function)
{
struct fw_page *result = NULL;
struct rb_root *root;
struct rb_node *tmp;
struct fw_page *tfp;
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return NULL;
tmp = root->rb_node;
while (tmp) {
tfp = rb_entry(tmp, struct fw_page, rb_node);
if (tfp->addr < addr) {
tmp = tmp->rb_left;
} else if (tfp->addr > addr) {
tmp = tmp->rb_right;
} else {
result = tfp;
break;
}
}
return result;
}
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
s32 *npages, int boot)
{
u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
int err;
MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
MLX5_SET(query_pages_in, in, op_mod, boot ?
MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
if (err)
return err;
*npages = MLX5_GET(query_pages_out, out, num_pages);
*func_id = MLX5_GET(query_pages_out, out, function_id);
return err;
}
static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
{
struct fw_page *fp = NULL;
struct fw_page *iter;
unsigned n;
list_for_each_entry(iter, &dev->priv.free_list, list) {
if (iter->function != function)
continue;
fp = iter;
}
if (list_empty(&dev->priv.free_list) || !fp)
return -ENOMEM;
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
if (n >= MLX5_NUM_4K_IN_PAGE) {
mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
return -ENOENT;
}
clear_bit(n, &fp->bitmask);
fp->free_count--;
if (!fp->free_count)
list_del(&fp->list);
*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
return 0;
}
#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
bool in_free_list)
{
struct rb_root *root;
root = xa_load(&dev->priv.page_root_xa, fwp->function);
if (WARN_ON_ONCE(!root))
return;
rb_erase(&fwp->rb_node, root);
if (in_free_list)
list_del(&fwp->list);
dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
}
static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
{
struct fw_page *fwp;
int n;
fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
if (!fwp) {
mlx5_core_warn_rl(dev, "page not found\n");
return;
}
n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
free_fwp(dev, fwp, fwp->free_count != 1);
else if (fwp->free_count == 1)
list_add(&fwp->list, &dev->priv.free_list);
}
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
struct page *page;
u64 zero_addr = 1;
u64 addr;
int err;
page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
if (!page) {
mlx5_core_warn(dev, "failed to allocate page\n");
return -ENOMEM;
}
map:
addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(device, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
goto err_mapping;
}
/* Firmware doesn't support page with physical address 0 */
if (addr == 0) {
zero_addr = addr;
goto map;
}
err = insert_page(dev, addr, page, function);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
err_mapping:
if (err)
__free_page(page);
if (zero_addr == 0)
dma_unmap_page(device, zero_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
return err;
}
static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int err;
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_exec_in(dev, manage_pages, in);
if (err)
mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
func_id, err);
}
static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int event, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
int notify_fail = event;
u16 func_type;
u64 addr;
int err;
u32 *in;
int i;
inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
goto out_free;
}
for (i = 0; i < npages; i++) {
retry:
err = alloc_4k(dev, &addr, function);
if (err) {
if (err == -ENOMEM)
err = alloc_system_page(dev, function);
if (err) {
dev->priv.fw_pages_alloc_failed += (npages - i);
goto out_4k;
}
goto retry;
}
MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
}
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
if (err == -EREMOTEIO) {
notify_fail = 0;
/* if triggered by FW and failed by FW ignore */
if (event) {
err = 0;
goto out_dropped;
}
}
err = mlx5_cmd_check(dev, err, in, out);
if (err) {
mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
func_id, npages, err);
goto out_dropped;
}
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] += npages;
dev->priv.fw_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
kvfree(in);
return 0;
out_dropped:
dev->priv.give_pages_dropped += npages;
out_4k:
for (i--; i >= 0; i--)
free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
out_free:
kvfree(in);
if (notify_fail)
page_notify_fail(dev, func_id, ec_function);
return err;
}
static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
bool ec_function)
{
u32 function = get_function(func_id, ec_function);
struct rb_root *root;
struct rb_node *p;
int npages = 0;
u16 func_type;
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
return;
p = rb_first(root);
while (p) {
struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
p = rb_next(p);
npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
free_fwp(dev, fwp, fwp->free_count);
}
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] -= npages;
dev->priv.fw_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id);
}
static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
u32 npages)
{
u32 pages_set = 0;
unsigned int n;
for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
pages_set++;
if (!--npages)
break;
}
return pages_set;
}
static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
u32 *in, int in_size, u32 *out, int out_size)
{
struct rb_root *root;
struct fw_page *fwp;
struct rb_node *p;
bool ec_function;
u32 func_id;
u32 npages;
u32 i = 0;
if (!mlx5_cmd_is_down(dev))
return mlx5_cmd_do(dev, in, in_size, out, out_size);
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
func_id = MLX5_GET(manage_pages_in, in, function_id);
ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
if (WARN_ON_ONCE(!root))
return -EEXIST;
p = rb_first(root);
while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
p = rb_next(p);
i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
}
MLX5_SET(manage_pages_out, out, output_num_entries, i);
return 0;
}
static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int *nclaimed, bool event, bool ec_function)
{
u32 function = get_function(func_id, ec_function);
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
u16 func_type;
u32 *out;
int err;
int i;
if (nclaimed)
*nclaimed = 0;
outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
MLX5_SET(manage_pages_in, in, function_id, func_id);
MLX5_SET(manage_pages_in, in, input_num_entries, npages);
MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
func_id, npages, outlen);
err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
if (err) {
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
dev->priv.reclaim_pages_discard += npages;
}
/* if triggered by FW event and failed by FW then ignore */
if (event && err == -EREMOTEIO) {
err = 0;
goto out_free;
}
err = mlx5_cmd_check(dev, err, in, out);
if (err) {
mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
if (num_claimed > npages) {
mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
num_claimed, npages);
err = -EINVAL;
goto out_free;
}
for (i = 0; i < num_claimed; i++)
free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
if (nclaimed)
*nclaimed = num_claimed;
func_type = func_id_to_type(dev, func_id, ec_function);
dev->priv.page_counters[func_type] -= num_claimed;
dev->priv.fw_pages -= num_claimed;
out_free:
kvfree(out);
return err;
}
static void pages_work_handler(struct work_struct *work)
{
struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
struct mlx5_core_dev *dev = req->dev;
int err = 0;
if (req->release_all)
release_all_pages(dev, req->func_id, req->ec_function);
else if (req->npages < 0)
err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
true, req->ec_function);
else if (req->npages > 0)
err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
if (err)
mlx5_core_warn(dev, "%s fail %d\n",
req->npages < 0 ? "reclaim" : "give", err);
kfree(req);
}
enum {
EC_FUNCTION_MASK = 0x8000,
RELEASE_ALL_PAGES_MASK = 0x4000,
};
static int req_pages_handler(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_pages_req *req;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
struct mlx5_eqe *eqe;
bool ec_function;
bool release_all;
u16 func_id;
s32 npages;
priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
dev = container_of(priv, struct mlx5_core_dev, priv);
eqe = data;
func_id = be16_to_cpu(eqe->data.req_pages.func_id);
npages = be32_to_cpu(eqe->data.req_pages.num_pages);
ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
RELEASE_ALL_PAGES_MASK;
mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
func_id, npages, release_all);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
mlx5_core_warn(dev, "failed to allocate pages request\n");
return NOTIFY_DONE;
}
req->dev = dev;
req->func_id = func_id;
req->npages = npages;
req->ec_function = ec_function;
req->release_all = release_all;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
return NOTIFY_OK;
}
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{
u16 func_id;
s32 npages;
int err;
err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
if (err)
return err;
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
}
enum {
MLX5_BLKS_FOR_RECLAIM_PAGES = 12
};
static int optimal_reclaimed_pages(void)
{
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_layout *lay;
int ret;
ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
MLX5_ST_SZ_BYTES(manage_pages_out)) /
MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
return ret;
}
static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
struct rb_root *root, u32 function)
{
u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
unsigned long end = jiffies + recl_pages_to_jiffies;
while (!RB_EMPTY_ROOT(root)) {
u32 ec_function = mlx5_get_ec_function(function);
u32 function_id = mlx5_get_func_id(function);
int nclaimed;
int err;
err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
&nclaimed, false, ec_function);
if (err) {
mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
err, function_id, ec_function);
return err;
}
if (nclaimed)
end = jiffies + recl_pages_to_jiffies;
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
break;
}
}
return 0;
}
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
{
struct rb_root *root;
unsigned long id;
void *entry;
xa_for_each(&dev->priv.page_root_xa, id, entry) {
root = entry;
mlx5_reclaim_root_pages(dev, root, id);
xa_erase(&dev->priv.page_root_xa, id);
kfree(root);
}
WARN_ON(!xa_empty(&dev->priv.page_root_xa));
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
WARN(dev->priv.page_counters[MLX5_VF],
"VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.page_counters[MLX5_VF]);
WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.page_counters[MLX5_HOST_PF]);
WARN(dev->priv.page_counters[MLX5_EC_VF],
"EC VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.page_counters[MLX5_EC_VF]);
return 0;
}
int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{
INIT_LIST_HEAD(&dev->priv.free_list);
dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
if (!dev->priv.pg_wq)
return -ENOMEM;
xa_init(&dev->priv.page_root_xa);
mlx5_pages_debugfs_init(dev);
return 0;
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{
mlx5_pages_debugfs_cleanup(dev);
xa_destroy(&dev->priv.page_root_xa);
destroy_workqueue(dev->priv.pg_wq);
}
void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
{
MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
}
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
{
mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
flush_workqueue(dev->priv.pg_wq);
}
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
unsigned long end = jiffies + recl_vf_pages_to_jiffies;
int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mlx5_core_warn(dev, "Skipping wait for vf pages stage");
return 0;
}
mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
while (*pages) {
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
return -ETIMEDOUT;
}
if (*pages < prev_pages) {
end = jiffies + recl_vf_pages_to_jiffies;
prev_pages = *pages;
}
msleep(50);
}
mlx5_core_dbg(dev, "All pages received\n");
return 0;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
|
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include <linux/rbtree.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
#define MLX5_INIT_COUNTERS_BULK 8
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
struct mlx5_fc_cache {
u64 packets;
u64 bytes;
u64 lastuse;
};
struct mlx5_fc {
struct list_head list;
struct llist_node addlist;
struct llist_node dellist;
/* last{packets,bytes} members are used when calculating the delta since
* last reading
*/
u64 lastpackets;
u64 lastbytes;
struct mlx5_fc_bulk *bulk;
u32 id;
bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
/* locking scheme:
*
* It is the responsibility of the user to prevent concurrent calls or bad
* ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
* to struct mlx5_fc.
* e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
* dump (access to struct mlx5_fc) after a counter is destroyed.
*
* access to counter list:
* - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by
* mlx5_fc_stats_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single
* node.
* - spawn thread to do the actual destroy
*
* - destroy (user context)
* - add a counter to lockless dellist
* - spawn thread to do the actual del
*
* - dump (user context)
* user should not call dump after destroy
*
* - query (single thread workqueue context)
* destroy/dump - no conflict (see destroy)
* query/dump - packets and bytes might be inconsistent (since update is not
* atomic)
* query/create - no conflict (see create)
* since every create/destroy spawn the work, only after necessary time has
* elapsed, the thread will actually query the hardware.
*/
static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
u32 id)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
unsigned long next_id = (unsigned long)id + 1;
struct mlx5_fc *counter;
unsigned long tmp;
rcu_read_lock();
/* skip counters that are in idr, but not yet in counters list */
idr_for_each_entry_continue_ul(&fc_stats->counters_idr,
counter, tmp, next_id) {
if (!list_empty(&counter->list))
break;
}
rcu_read_unlock();
return counter ? &counter->list : &fc_stats->counters;
}
static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
struct mlx5_fc *counter)
{
struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
list_add_tail(&counter->list, next);
}
static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
list_del(&counter->list);
spin_lock(&fc_stats->counters_idr_lock);
WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
spin_unlock(&fc_stats->counters_idr_lock);
}
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
return min_t(int, MLX5_INIT_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
static void update_counter_cache(int index, u32 *bulk_raw_data,
struct mlx5_fc_cache *cache)
{
void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
flow_statistics[index]);
u64 packets = MLX5_GET64(traffic_counter, stats, packets);
u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
if (cache->packets == packets)
return;
cache->packets = packets;
cache->bytes = bytes;
cache->lastuse = jiffies;
}
static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
struct mlx5_fc *first,
u32 last_id)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
bool query_more_counters = (first->id <= last_id);
int cur_bulk_len = fc_stats->bulk_query_len;
u32 *data = fc_stats->bulk_query_out;
struct mlx5_fc *counter = first;
u32 bulk_base_id;
int bulk_len;
int err;
while (query_more_counters) {
/* first id must be aligned to 4 when using bulk query */
bulk_base_id = counter->id & ~0x3;
/* number of counters to query inc. the last counter */
bulk_len = min_t(int, cur_bulk_len,
ALIGN(last_id - bulk_base_id + 1, 4));
err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
data);
if (err) {
mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
return;
}
query_more_counters = false;
list_for_each_entry_from(counter, &fc_stats->counters, list) {
int counter_index = counter->id - bulk_base_id;
struct mlx5_fc_cache *cache = &counter->cache;
if (counter->id >= bulk_base_id + bulk_len) {
query_more_counters = true;
break;
}
update_counter_cache(counter_index, data, cache);
}
}
}
static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
if (counter->bulk)
mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
else
mlx5_fc_free(dev, counter);
}
static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
int max_bulk_len = get_max_bulk_query_len(dev);
unsigned long now = jiffies;
u32 *bulk_query_out_tmp;
int max_out_len;
if (fc_stats->bulk_query_alloc_failed &&
time_before(now, fc_stats->next_bulk_query_alloc))
return;
max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
if (!bulk_query_out_tmp) {
mlx5_core_warn_once(dev,
"Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
max_bulk_len);
fc_stats->bulk_query_alloc_failed = true;
fc_stats->next_bulk_query_alloc =
now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
return;
}
kfree(fc_stats->bulk_query_out);
fc_stats->bulk_query_out = bulk_query_out_tmp;
fc_stats->bulk_query_len = max_bulk_len;
if (fc_stats->bulk_query_alloc_failed) {
mlx5_core_info(dev,
"Flow counters bulk query buffer size increased, bulk_size(%d)\n",
max_bulk_len);
fc_stats->bulk_query_alloc_failed = false;
}
}
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
priv.fc_stats.work.work);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
/* Take dellist first to ensure that counters cannot be deleted before
* they are inserted.
*/
struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
unsigned long now = jiffies;
if (addlist || !list_empty(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval);
llist_for_each_entry(counter, addlist, addlist) {
mlx5_fc_stats_insert(dev, counter);
fc_stats->num_counters++;
}
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
mlx5_fc_release(dev, counter);
fc_stats->num_counters--;
}
if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
fc_stats->num_counters > get_init_bulk_query_len(dev))
mlx5_fc_stats_bulk_query_size_increase(dev);
if (time_before(now, fc_stats->next_query) ||
list_empty(&fc_stats->counters))
return;
last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
list);
if (counter)
mlx5_fc_stats_query_counter_range(dev, counter, last->id);
fc_stats->next_query = now + fc_stats->sampling_interval;
}
static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
{
struct mlx5_fc *counter;
int err;
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return ERR_PTR(-ENOMEM);
err = mlx5_cmd_fc_alloc(dev, &counter->id);
if (err) {
kfree(counter);
return ERR_PTR(err);
}
return counter;
}
static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter;
if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
if (!IS_ERR(counter))
return counter;
}
return mlx5_fc_single_alloc(dev);
}
struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
{
struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
int err;
if (IS_ERR(counter))
return counter;
INIT_LIST_HEAD(&counter->list);
counter->aging = aging;
if (aging) {
u32 id = counter->id;
counter->cache.lastuse = jiffies;
counter->lastbytes = counter->cache.bytes;
counter->lastpackets = counter->cache.packets;
idr_preload(GFP_KERNEL);
spin_lock(&fc_stats->counters_idr_lock);
err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
GFP_NOWAIT);
spin_unlock(&fc_stats->counters_idr_lock);
idr_preload_end();
if (err)
goto err_out_alloc;
llist_add(&counter->addlist, &fc_stats->addlist);
}
return counter;
err_out_alloc:
mlx5_fc_release(dev, counter);
return ERR_PTR(err);
}
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
{
struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
if (aging)
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
return counter;
}
EXPORT_SYMBOL(mlx5_fc_create);
u32 mlx5_fc_id(struct mlx5_fc *counter)
{
return counter->id;
}
EXPORT_SYMBOL(mlx5_fc_id);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
if (!counter)
return;
if (counter->aging) {
llist_add(&counter->dellist, &fc_stats->dellist);
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
return;
}
mlx5_fc_release(dev, counter);
}
EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
int init_bulk_len;
int init_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
INIT_LIST_HEAD(&fc_stats->counters);
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
init_bulk_len = get_init_bulk_query_len(dev);
init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
if (!fc_stats->bulk_query_out)
return -ENOMEM;
fc_stats->bulk_query_len = init_bulk_len;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
goto err_wq_create;
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
return 0;
err_wq_create:
kfree(fc_stats->bulk_query_out);
return -ENOMEM;
}
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct llist_node *tmplist;
struct mlx5_fc *counter;
struct mlx5_fc *tmp;
cancel_delayed_work_sync(&dev->priv.fc_stats.work);
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
tmplist = llist_del_all(&fc_stats->addlist);
llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
mlx5_fc_release(dev, counter);
list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
mlx5_fc_release(dev, counter);
mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
idr_destroy(&fc_stats->counters_idr);
kfree(fc_stats->bulk_query_out);
}
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes)
{
return mlx5_cmd_fc_query(dev, counter->id, packets, bytes);
}
EXPORT_SYMBOL(mlx5_fc_query);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
{
return counter->cache.lastuse;
}
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
struct mlx5_fc_cache c;
c = counter->cache;
*bytes = c.bytes - counter->lastbytes;
*packets = c.packets - counter->lastpackets;
*lastuse = c.lastuse;
counter->lastbytes = c.bytes;
counter->lastpackets = c.packets;
}
void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
struct mlx5_fc_cache c = counter->cache;
*bytes = c.bytes;
*packets = c.packets;
*lastuse = c.lastuse;
}
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
struct delayed_work *dwork,
unsigned long delay)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
queue_delayed_work(fc_stats->wq, dwork, delay);
}
void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
unsigned long interval)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
fc_stats->sampling_interval = min_t(unsigned long, interval,
fc_stats->sampling_interval);
}
/* Flow counter bluks */
struct mlx5_fc_bulk {
struct list_head pool_list;
u32 base_id;
int bulk_len;
unsigned long *bitmask;
struct mlx5_fc fcs[];
};
static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
u32 id)
{
counter->bulk = bulk;
counter->id = id;
}
static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
{
return bitmap_weight(bulk->bitmask, bulk->bulk_len);
}
static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
{
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
struct mlx5_fc_bulk *bulk;
int err = -ENOMEM;
int bulk_len;
u32 base_id;
int i;
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
if (!bulk)
goto err_alloc_bulk;
bulk->bitmask = kvcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
GFP_KERNEL);
if (!bulk->bitmask)
goto err_alloc_bitmask;
err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
if (err)
goto err_mlx5_cmd_bulk_alloc;
bulk->base_id = base_id;
bulk->bulk_len = bulk_len;
for (i = 0; i < bulk_len; i++) {
mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
set_bit(i, bulk->bitmask);
}
return bulk;
err_mlx5_cmd_bulk_alloc:
kvfree(bulk->bitmask);
err_alloc_bitmask:
kvfree(bulk);
err_alloc_bulk:
return ERR_PTR(err);
}
static int
mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
{
if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
return -EBUSY;
}
mlx5_cmd_fc_free(dev, bulk->base_id);
kvfree(bulk->bitmask);
kvfree(bulk);
return 0;
}
static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
{
int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
if (free_fc_index >= bulk->bulk_len)
return ERR_PTR(-ENOSPC);
clear_bit(free_fc_index, bulk->bitmask);
return &bulk->fcs[free_fc_index];
}
static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
{
int fc_index = fc->id - bulk->base_id;
if (test_bit(fc_index, bulk->bitmask))
return -EINVAL;
set_bit(fc_index, bulk->bitmask);
return 0;
}
/* Flow counters pool API */
static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
{
fc_pool->dev = dev;
mutex_init(&fc_pool->pool_lock);
INIT_LIST_HEAD(&fc_pool->fully_used);
INIT_LIST_HEAD(&fc_pool->partially_used);
INIT_LIST_HEAD(&fc_pool->unused);
fc_pool->available_fcs = 0;
fc_pool->used_fcs = 0;
fc_pool->threshold = 0;
}
static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
{
struct mlx5_core_dev *dev = fc_pool->dev;
struct mlx5_fc_bulk *bulk;
struct mlx5_fc_bulk *tmp;
list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
mlx5_fc_bulk_destroy(dev, bulk);
list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
mlx5_fc_bulk_destroy(dev, bulk);
list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
mlx5_fc_bulk_destroy(dev, bulk);
}
static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
{
fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
}
static struct mlx5_fc_bulk *
mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
{
struct mlx5_core_dev *dev = fc_pool->dev;
struct mlx5_fc_bulk *new_bulk;
new_bulk = mlx5_fc_bulk_create(dev);
if (!IS_ERR(new_bulk))
fc_pool->available_fcs += new_bulk->bulk_len;
mlx5_fc_pool_update_threshold(fc_pool);
return new_bulk;
}
static void
mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
{
struct mlx5_core_dev *dev = fc_pool->dev;
fc_pool->available_fcs -= bulk->bulk_len;
mlx5_fc_bulk_destroy(dev, bulk);
mlx5_fc_pool_update_threshold(fc_pool);
}
static struct mlx5_fc *
mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
struct list_head *next_list,
bool move_non_full_bulk)
{
struct mlx5_fc_bulk *bulk;
struct mlx5_fc *fc;
if (list_empty(src_list))
return ERR_PTR(-ENODATA);
bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
fc = mlx5_fc_bulk_acquire_fc(bulk);
if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
list_move(&bulk->pool_list, next_list);
return fc;
}
static struct mlx5_fc *
mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
{
struct mlx5_fc_bulk *new_bulk;
struct mlx5_fc *fc;
mutex_lock(&fc_pool->pool_lock);
fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
&fc_pool->fully_used, false);
if (IS_ERR(fc))
fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
&fc_pool->partially_used,
true);
if (IS_ERR(fc)) {
new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
if (IS_ERR(new_bulk)) {
fc = ERR_CAST(new_bulk);
goto out;
}
fc = mlx5_fc_bulk_acquire_fc(new_bulk);
list_add(&new_bulk->pool_list, &fc_pool->partially_used);
}
fc_pool->available_fcs--;
fc_pool->used_fcs++;
out:
mutex_unlock(&fc_pool->pool_lock);
return fc;
}
static void
mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
{
struct mlx5_core_dev *dev = fc_pool->dev;
struct mlx5_fc_bulk *bulk = fc->bulk;
int bulk_free_fcs_amount;
mutex_lock(&fc_pool->pool_lock);
if (mlx5_fc_bulk_release_fc(bulk, fc)) {
mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
goto unlock;
}
fc_pool->available_fcs++;
fc_pool->used_fcs--;
bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
if (bulk_free_fcs_amount == 1)
list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
if (bulk_free_fcs_amount == bulk->bulk_len) {
list_del(&bulk->pool_list);
if (fc_pool->available_fcs > fc_pool->threshold)
mlx5_fc_pool_free_bulk(fc_pool, bulk);
else
list_add(&bulk->pool_list, &fc_pool->unused);
}
unlock:
mutex_unlock(&fc_pool->pool_lock);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
|
/*
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/driver.h>
#include "wq.h"
#include "mlx5_core.h"
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
int err;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
wq->sz = mlx5_wq_cyc_get_size(wq);
wq_ctrl->mdev = mdev;
return 0;
err_db_free:
mlx5_db_free(mdev, &wq_ctrl->db);
return err;
}
void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
{
size_t len;
void *wqe;
if (!net_ratelimit())
return;
nstrides = max_t(u8, nstrides, 1);
len = nstrides << wq->fbc.log_stride;
wqe = mlx5_wq_cyc_get_wqe(wq, ix);
pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
}
void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq)
{
wq->wqe_ctr = 0;
wq->cur_sz = 0;
mlx5_wq_cyc_update_db_record(wq);
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
u8 log_rq_sz = MLX5_GET(qpc, qpc, log_rq_size);
u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
u8 log_sq_sz = MLX5_GET(qpc, qpc, log_sq_size);
u32 rq_byte_size;
int err;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_frag_buf_alloc_node(mdev,
wq_get_byte_sz(log_rq_sz, log_rq_stride) +
wq_get_byte_sz(log_sq_sz, log_sq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
if (rq_byte_size < PAGE_SIZE) {
/* SQ starts within the same page of the RQ */
u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
mlx5_init_fbc_offset(wq_ctrl->buf.frags,
log_sq_stride, log_sq_sz, sq_strides_offset,
&wq->sq.fbc);
} else {
u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
log_sq_stride, log_sq_sz, &wq->sq.fbc);
}
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
wq_ctrl->mdev = mdev;
return 0;
err_db_free:
mlx5_db_free(mdev, &wq_ctrl->db);
return err;
}
int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
/* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf,
param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
err);
goto err_db_free;
}
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
wq_ctrl->mdev = mdev;
return 0;
err_db_free:
mlx5_db_free(mdev, &wq_ctrl->db);
return err;
}
static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq)
{
struct mlx5_wqe_srq_next_seg *next_seg;
int i;
for (i = 0; i < wq->fbc.sz_m1; i++) {
next_seg = mlx5_wq_ll_get_wqe(wq, i);
next_seg->next_wqe_index = cpu_to_be16(i + 1);
}
next_seg = mlx5_wq_ll_get_wqe(wq, i);
wq->tail_next = &next_seg->next_wqe_index;
}
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
int err;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
mlx5_wq_ll_init_list(wq);
wq_ctrl->mdev = mdev;
return 0;
err_db_free:
mlx5_db_free(mdev, &wq_ctrl->db);
return err;
}
void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq)
{
wq->head = 0;
wq->wqe_ctr = 0;
wq->cur_sz = 0;
mlx5_wq_ll_init_list(wq);
mlx5_wq_ll_update_db_record(wq);
}
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
{
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/wq.c
|
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/fs.h>
#include "en.h"
#include "en/params.h"
#include "en/xsk/pool.h"
#include "en/fs_ethtool.h"
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
};
#define ETHTOOL_NUM_L3_L4_FTS 7
#define ETHTOOL_NUM_L2_FTS 4
struct mlx5e_ethtool_steering {
struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
struct list_head rules;
int tot_num_rules;
};
static int flow_type_to_traffic_type(u32 flow_type);
static u32 flow_type_mask(u32 flow_type)
{
return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
}
struct mlx5e_ethtool_rule {
struct list_head list;
struct ethtool_rx_flow_spec flow_spec;
struct mlx5_flow_handle *rule;
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_rss *rss;
};
static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
{
if (!--eth_ft->num_rules) {
mlx5_destroy_flow_table(eth_ft->ft);
eth_ft->ft = NULL;
}
}
#define MLX5E_ETHTOOL_L3_L4_PRIO 0
#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
#define MLX5E_ETHTOOL_NUM_GROUPS 10
static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs,
int num_tuples)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_ethtool_table *eth_ft;
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
int max_tuples;
int table_size;
int prio;
switch (flow_type_mask(fs->flow_type)) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = ðtool->l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
eth_ft = ðtool->l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
return ERR_PTR(-EINVAL);
}
eth_ft->num_rules++;
if (eth_ft->ft)
return eth_ft;
ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_ETHTOOL);
if (!ns)
return ERR_PTR(-EOPNOTSUPP);
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),
MLX5E_ETHTOOL_NUM_ENTRIES);
ft_attr.prio = prio;
ft_attr.max_fte = table_size;
ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ft))
return (void *)ft;
eth_ft->ft = ft;
return eth_ft;
}
static void mask_spec(u8 *mask, u8 *val, size_t size)
{
unsigned int i;
for (i = 0; i < size; i++, mask++, val++)
*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
}
#define MLX5E_FTE_SET(header_p, fld, v) \
MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
#define MLX5E_FTE_ADDR_OF(header_p, fld) \
MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
static void
set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
{
if (ip4src_m) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_v, sizeof(ip4src_v));
memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_m, sizeof(ip4src_m));
}
if (ip4dst_m) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_v, sizeof(ip4dst_v));
memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_m, sizeof(ip4dst_m));
}
MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
}
static void
set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
__be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
{
u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
ip6src_v, ip6_sz);
memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
ip6src_m, ip6_sz);
}
if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
ip6dst_v, ip6_sz);
memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
ip6dst_m, ip6_sz);
}
MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
}
static void
set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
__be16 pdst_m, __be16 pdst_v)
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
}
if (pdst_m) {
MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
}
MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
}
static void
set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
__be16 pdst_m, __be16 pdst_v)
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
}
if (pdst_m) {
MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
}
MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
}
static void
parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
l4_mask->ip4dst, l4_val->ip4dst);
set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
l4_mask->pdst, l4_val->pdst);
}
static void
parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
l4_mask->ip4dst, l4_val->ip4dst);
set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
l4_mask->pdst, l4_val->pdst);
}
static void
parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
l3_mask->ip4dst, l3_val->ip4dst);
if (l3_mask->proto) {
MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
}
}
static void
parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
set_ip6(headers_c, headers_v, l3_mask->ip6src,
l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
if (l3_mask->l4_proto) {
MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
}
}
static void
parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
set_ip6(headers_c, headers_v, l4_mask->ip6src,
l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
l4_mask->pdst, l4_val->pdst);
}
static void
parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
set_ip6(headers_c, headers_v, l4_mask->ip6src,
l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
l4_mask->pdst, l4_val->pdst);
}
static void
parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethhdr *eth_mask = &fs->m_u.ether_spec;
struct ethhdr *eth_val = &fs->h_u.ether_spec;
mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
}
static void
set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
{
MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
}
static void
set_dmac(void *headers_c, void *headers_v,
unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
{
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
}
static int set_flow_attrs(u32 *match_c, u32 *match_v,
struct ethtool_rx_flow_spec *fs)
{
void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers);
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers);
u32 flow_type = flow_type_mask(fs->flow_type);
switch (flow_type) {
case TCP_V4_FLOW:
parse_tcp4(outer_headers_c, outer_headers_v, fs);
break;
case UDP_V4_FLOW:
parse_udp4(outer_headers_c, outer_headers_v, fs);
break;
case IP_USER_FLOW:
parse_ip4(outer_headers_c, outer_headers_v, fs);
break;
case TCP_V6_FLOW:
parse_tcp6(outer_headers_c, outer_headers_v, fs);
break;
case UDP_V6_FLOW:
parse_udp6(outer_headers_c, outer_headers_v, fs);
break;
case IPV6_USER_FLOW:
parse_ip6(outer_headers_c, outer_headers_v, fs);
break;
case ETHER_FLOW:
parse_ether(outer_headers_c, outer_headers_v, fs);
break;
default:
return -EINVAL;
}
if ((fs->flow_type & FLOW_EXT) &&
(fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) {
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
fs->h_ext.h_dest);
}
return 0;
}
static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct list_head *head = ðtool->rules;
struct mlx5e_ethtool_rule *iter;
list_for_each_entry(iter, ðtool->rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
ethtool->tot_num_rules++;
list_add(&rule->list, head);
}
static bool outer_header_zero(u32 *match_criteria)
{
int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
outer_headers_c + 1,
size - 1);
}
static int flow_get_tirn(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *eth_rule,
struct ethtool_rx_flow_spec *fs,
u32 rss_context, u32 *tirn)
{
if (fs->flow_type & FLOW_RSS) {
struct mlx5e_packet_merge_param pkt_merge_param;
struct mlx5e_rss *rss;
u32 flow_type;
int err;
int tt;
rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context);
if (!rss)
return -ENOENT;
flow_type = flow_type_mask(fs->flow_type);
tt = flow_type_to_traffic_type(flow_type);
if (tt < 0)
return -EINVAL;
pkt_merge_param = priv->channels.params.packet_merge;
err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
if (err)
return err;
eth_rule->rss = rss;
mlx5e_rss_refcnt_inc(eth_rule->rss);
} else {
*tirn = mlx5e_rx_res_get_tirn_direct(priv->rx_res, fs->ring_cookie);
}
return 0;
}
static struct mlx5_flow_handle *
add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *eth_rule,
struct mlx5_flow_table *ft,
struct ethtool_rx_flow_spec *fs, u32 rss_context)
{
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
err = set_flow_attrs(spec->match_criteria, spec->match_value,
fs);
if (err)
goto free;
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
} else {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) {
err = -ENOMEM;
goto free;
}
err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num);
if (err)
goto free;
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
__func__, err);
goto free;
}
free:
kvfree(spec);
kfree(dst);
return err ? ERR_PTR(err) : rule;
}
static void del_ethtool_rule(struct mlx5e_flow_steering *fs,
struct mlx5e_ethtool_rule *eth_rule)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
if (eth_rule->rule)
mlx5_del_flow_rules(eth_rule->rule);
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(ð_rule->list);
ethtool->tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
int location)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *iter;
list_for_each_entry(iter, ðtool->rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
return NULL;
}
static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
int location)
{
struct mlx5e_ethtool_rule *eth_rule;
eth_rule = find_ethtool_rule(priv, location);
if (eth_rule)
del_ethtool_rule(priv->fs, eth_rule);
eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
if (!eth_rule)
return ERR_PTR(-ENOMEM);
add_rule_to_list(priv, eth_rule);
return eth_rule;
}
#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
#define all_ones(field) (field == (__force typeof(field))-1)
#define all_zeros_or_all_ones(field) \
((field) == 0 || (field) == (__force typeof(field))-1)
static int validate_ethter(struct ethtool_rx_flow_spec *fs)
{
struct ethhdr *eth_mask = &fs->m_u.ether_spec;
int ntuples = 0;
if (!is_zero_ether_addr(eth_mask->h_dest))
ntuples++;
if (!is_zero_ether_addr(eth_mask->h_source))
ntuples++;
if (eth_mask->h_proto)
ntuples++;
return ntuples;
}
static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
int ntuples = 0;
if (l4_mask->tos)
return -EINVAL;
if (l4_mask->ip4src)
ntuples++;
if (l4_mask->ip4dst)
ntuples++;
if (l4_mask->psrc)
ntuples++;
if (l4_mask->pdst)
ntuples++;
/* Flow is TCP/UDP */
return ++ntuples;
}
static int validate_ip4(struct ethtool_rx_flow_spec *fs)
{
struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
int ntuples = 0;
if (l3_mask->l4_4_bytes || l3_mask->tos ||
fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
return -EINVAL;
if (l3_mask->ip4src)
ntuples++;
if (l3_mask->ip4dst)
ntuples++;
if (l3_mask->proto)
ntuples++;
/* Flow is IPv4 */
return ++ntuples;
}
static int validate_ip6(struct ethtool_rx_flow_spec *fs)
{
struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
int ntuples = 0;
if (l3_mask->l4_4_bytes || l3_mask->tclass)
return -EINVAL;
if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
ntuples++;
if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
ntuples++;
if (l3_mask->l4_proto)
ntuples++;
/* Flow is IPv6 */
return ++ntuples;
}
static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
int ntuples = 0;
if (l4_mask->tclass)
return -EINVAL;
if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
ntuples++;
if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
ntuples++;
if (l4_mask->psrc)
ntuples++;
if (l4_mask->pdst)
ntuples++;
/* Flow is TCP/UDP */
return ++ntuples;
}
static int validate_vlan(struct ethtool_rx_flow_spec *fs)
{
if (fs->m_ext.vlan_etype ||
fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
return -EINVAL;
if (fs->m_ext.vlan_tci &&
(be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
return -EINVAL;
return 1;
}
static int validate_flow(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs)
{
int num_tuples = 0;
int ret = 0;
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
return -ENOSPC;
if (fs->ring_cookie != RX_CLS_FLOW_DISC)
if (fs->ring_cookie >= priv->channels.params.num_channels)
return -EINVAL;
switch (flow_type_mask(fs->flow_type)) {
case ETHER_FLOW:
num_tuples += validate_ethter(fs);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
ret = validate_tcpudp4(fs);
if (ret < 0)
return ret;
num_tuples += ret;
break;
case IP_USER_FLOW:
ret = validate_ip4(fs);
if (ret < 0)
return ret;
num_tuples += ret;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
ret = validate_tcpudp6(fs);
if (ret < 0)
return ret;
num_tuples += ret;
break;
case IPV6_USER_FLOW:
ret = validate_ip6(fs);
if (ret < 0)
return ret;
num_tuples += ret;
break;
default:
return -ENOTSUPP;
}
if ((fs->flow_type & FLOW_EXT)) {
ret = validate_vlan(fs);
if (ret < 0)
return ret;
num_tuples += ret;
}
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest))
num_tuples++;
return num_tuples;
}
static int
mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs, u32 rss_context)
{
struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule;
struct mlx5_flow_handle *rule;
int num_tuples;
int err;
num_tuples = validate_flow(priv, fs);
if (num_tuples <= 0) {
netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
__func__, num_tuples);
return num_tuples;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
if (IS_ERR(eth_ft))
return PTR_ERR(eth_ft);
eth_rule = get_ethtool_rule(priv, fs->location);
if (IS_ERR(eth_rule)) {
put_flow_table(eth_ft);
return PTR_ERR(eth_rule);
}
eth_rule->flow_spec = *fs;
eth_rule->eth_ft = eth_ft;
rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto del_ethtool_rule;
}
eth_rule->rule = rule;
return 0;
del_ethtool_rule:
del_ethtool_rule(priv->fs, eth_rule);
return err;
}
static int
mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
{
struct mlx5e_ethtool_rule *eth_rule;
int err = 0;
if (location >= MAX_NUM_OF_ETHTOOL_RULES)
return -ENOSPC;
eth_rule = find_ethtool_rule(priv, location);
if (!eth_rule) {
err = -ENOENT;
goto out;
}
del_ethtool_rule(priv->fs, eth_rule);
out:
return err;
}
static int
mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, int location)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
struct mlx5e_ethtool_rule *eth_rule;
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
list_for_each_entry(eth_rule, ðtool->rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
continue;
if (!info)
return 0;
info->fs = eth_rule->flow_spec;
if (!eth_rule->rss)
return 0;
index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss);
if (index < 0)
return index;
info->rss_context = index;
return 0;
}
return -ENOENT;
}
static int
mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
int location = 0;
int idx = 0;
int err = 0;
info->data = MAX_NUM_OF_ETHTOOL_RULES;
while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
err = mlx5e_ethtool_get_flow(priv, NULL, location);
if (!err)
rule_locs[idx++] = location;
location++;
}
return err;
}
int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
{
*ethtool = kvzalloc(sizeof(**ethtool), GFP_KERNEL);
if (!*ethtool)
return -ENOMEM;
return 0;
}
void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool)
{
kvfree(ethtool);
}
void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
list_for_each_entry_safe(iter, temp, ðtool->rules, list)
del_ethtool_rule(fs, iter);
}
void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
INIT_LIST_HEAD(ðtool->rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
{
switch (flow_type) {
case TCP_V4_FLOW:
return MLX5_TT_IPV4_TCP;
case TCP_V6_FLOW:
return MLX5_TT_IPV6_TCP;
case UDP_V4_FLOW:
return MLX5_TT_IPV4_UDP;
case UDP_V6_FLOW:
return MLX5_TT_IPV6_UDP;
case AH_V4_FLOW:
return MLX5_TT_IPV4_IPSEC_AH;
case AH_V6_FLOW:
return MLX5_TT_IPV6_IPSEC_AH;
case ESP_V4_FLOW:
return MLX5_TT_IPV4_IPSEC_ESP;
case ESP_V6_FLOW:
return MLX5_TT_IPV6_IPSEC_ESP;
case IPV4_FLOW:
return MLX5_TT_IPV4;
case IPV6_FLOW:
return MLX5_TT_IPV6;
default:
return -EINVAL;
}
}
static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
u8 rx_hash_field = 0;
u32 flow_type = 0;
u32 rss_idx = 0;
int err;
int tt;
if (nfc->flow_type & FLOW_RSS)
rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
if (tt < 0)
return tt;
/* RSS does not support anything other than hashing to queues
* on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
* port.
*/
if (flow_type != TCP_V4_FLOW &&
flow_type != TCP_V6_FLOW &&
flow_type != UDP_V4_FLOW &&
flow_type != UDP_V6_FLOW)
return -EOPNOTSUPP;
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EOPNOTSUPP;
if (nfc->data & RXH_IP_SRC)
rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
if (nfc->data & RXH_IP_DST)
rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
if (nfc->data & RXH_L4_B_0_1)
rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
if (nfc->data & RXH_L4_B_2_3)
rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
mutex_lock(&priv->state_lock);
err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, rss_idx, tt, rx_hash_field);
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
struct ethtool_rxnfc *nfc)
{
int hash_field = 0;
u32 flow_type = 0;
u32 rss_idx = 0;
int tt;
if (nfc->flow_type & FLOW_RSS)
rss_idx = nfc->rss_context;
flow_type = flow_type_mask(nfc->flow_type);
tt = flow_type_to_traffic_type(flow_type);
if (tt < 0)
return tt;
hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, rss_idx, tt);
if (hash_field < 0)
return hash_field;
nfc->data = 0;
if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
nfc->data |= RXH_IP_SRC;
if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
nfc->data |= RXH_IP_DST;
if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
nfc->data |= RXH_L4_B_0_1;
if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
nfc->data |= RXH_L4_B_2_3;
return 0;
}
int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{
int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context);
break;
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
case ETHTOOL_SRXFH:
err = mlx5e_set_rss_hash_opt(priv, cmd);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
int err = 0;
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = ethtool->tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
case ETHTOOL_GRXFH:
err = mlx5e_get_rss_hash_opt(priv, info);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
|
/*
* Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/udp.h>
#include "en.h"
#include "en/port.h"
#include "eswitch.h"
static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
struct mlx5_core_health *health = &priv->mdev->priv.health;
return health->fatal_error ? 1 : 0;
}
static int mlx5e_test_link_state(struct mlx5e_priv *priv)
{
u8 port_state;
if (!netif_carrier_ok(priv->netdev))
return 1;
port_state = mlx5_query_vport_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0);
return port_state == VPORT_STATE_UP ? 0 : 1;
}
static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
{
u32 speed;
if (!netif_carrier_ok(priv->netdev))
return 1;
return mlx5e_port_linkspeed(priv->mdev, &speed);
}
struct mlx5ehdr {
__be32 version;
__be64 magic;
};
#ifdef CONFIG_INET
/* loopback test */
#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
{
struct sk_buff *skb = NULL;
struct mlx5ehdr *mlxh;
struct ethhdr *ethh;
struct udphdr *udph;
struct iphdr *iph;
int iplen;
skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
if (!skb) {
netdev_err(priv->netdev, "\tFailed to alloc loopback skb\n");
return NULL;
}
net_prefetchw(skb->data);
skb_reserve(skb, NET_IP_ALIGN);
/* Reserve for ethernet and IP header */
ethh = skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb->len);
iph = skb_put(skb, sizeof(struct iphdr));
skb_set_transport_header(skb, skb->len);
udph = skb_put(skb, sizeof(struct udphdr));
/* Fill ETH header */
ether_addr_copy(ethh->h_dest, priv->netdev->dev_addr);
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_IP);
/* Fill UDP header */
udph->source = htons(9);
udph->dest = htons(9); /* Discard Protocol */
udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
udph->check = 0;
/* Fill IP header */
iph->ihl = 5;
iph->ttl = 32;
iph->version = 4;
iph->protocol = IPPROTO_UDP;
iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
sizeof(struct mlx5ehdr);
iph->tot_len = htons(iplen);
iph->frag_off = 0;
iph->saddr = 0;
iph->daddr = 0;
iph->tos = 0;
iph->id = 0;
ip_send_check(iph);
/* Fill test header and data */
mlxh = skb_put(skb, sizeof(*mlxh));
mlxh->version = 0;
mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
skb->csum = 0;
skb->ip_summed = CHECKSUM_PARTIAL;
udp4_hwcsum(skb, iph->saddr, iph->daddr);
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
skb->dev = priv->netdev;
return skb;
}
struct mlx5e_lbt_priv {
struct packet_type pt;
struct completion comp;
bool loopback_ok;
bool local_lb;
};
static int
mlx5e_test_loopback_validate(struct sk_buff *skb,
struct net_device *ndev,
struct packet_type *pt,
struct net_device *orig_ndev)
{
struct mlx5e_lbt_priv *lbtp = pt->af_packet_priv;
struct mlx5ehdr *mlxh;
struct ethhdr *ethh;
struct udphdr *udph;
struct iphdr *iph;
/* We are only going to peek, no need to clone the SKB */
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
ethh = (struct ethhdr *)skb_mac_header(skb);
if (!ether_addr_equal(ethh->h_dest, orig_ndev->dev_addr))
goto out;
iph = ip_hdr(skb);
if (iph->protocol != IPPROTO_UDP)
goto out;
/* Don't assume skb_transport_header() was set */
udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
if (udph->dest != htons(9))
goto out;
mlxh = (struct mlx5ehdr *)((char *)udph + sizeof(*udph));
if (mlxh->magic != cpu_to_be64(MLX5E_TEST_MAGIC))
goto out; /* so close ! */
/* bingo */
lbtp->loopback_ok = true;
complete(&lbtp->comp);
out:
kfree_skb(skb);
return 0;
}
static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
int err = 0;
/* Temporarily enable local_lb */
err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb);
if (err)
return err;
if (!lbtp->local_lb) {
err = mlx5_nic_vport_update_local_lb(priv->mdev, true);
if (err)
return err;
}
err = mlx5e_refresh_tirs(priv, true, false);
if (err)
goto out;
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
lbtp->pt.type = htons(ETH_P_IP);
lbtp->pt.func = mlx5e_test_loopback_validate;
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
dev_add_pack(&lbtp->pt);
return 0;
out:
if (!lbtp->local_lb)
mlx5_nic_vport_update_local_lb(priv->mdev, false);
return err;
}
static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
struct mlx5e_lbt_priv *lbtp)
{
if (!lbtp->local_lb)
mlx5_nic_vport_update_local_lb(priv->mdev, false);
dev_remove_pack(&lbtp->pt);
mlx5e_refresh_tirs(priv, false, false);
}
static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
{
if (is_mdev_switchdev_mode(priv->mdev))
return -EOPNOTSUPP;
return 0;
}
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
static int mlx5e_test_loopback(struct mlx5e_priv *priv)
{
struct mlx5e_lbt_priv *lbtp;
struct sk_buff *skb = NULL;
int err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
netdev_err(priv->netdev,
"\tCan't perform loopback test while device is down\n");
return -ENODEV;
}
lbtp = kzalloc(sizeof(*lbtp), GFP_KERNEL);
if (!lbtp)
return -ENOMEM;
lbtp->loopback_ok = false;
err = mlx5e_test_loopback_setup(priv, lbtp);
if (err)
goto out;
skb = mlx5e_test_get_udp_skb(priv);
if (!skb) {
err = -ENOMEM;
goto cleanup;
}
skb_set_queue_mapping(skb, 0);
err = dev_queue_xmit(skb);
if (err) {
netdev_err(priv->netdev,
"\tFailed to xmit loopback packet err(%d)\n",
err);
goto cleanup;
}
wait_for_completion_timeout(&lbtp->comp, MLX5E_LB_VERIFY_TIMEOUT);
err = !lbtp->loopback_ok;
cleanup:
mlx5e_test_loopback_cleanup(priv, lbtp);
out:
kfree(lbtp);
return err;
}
#endif
typedef int (*mlx5e_st_func)(struct mlx5e_priv *);
struct mlx5e_st {
char name[ETH_GSTRING_LEN];
mlx5e_st_func st_func;
mlx5e_st_func cond_func;
};
static struct mlx5e_st mlx5e_sts[] = {
{ "Link Test", mlx5e_test_link_state },
{ "Speed Test", mlx5e_test_link_speed },
{ "Health Test", mlx5e_test_health_info },
#ifdef CONFIG_INET
{ "Loopback Test", mlx5e_test_loopback, mlx5e_cond_loopback },
#endif
};
#define MLX5E_ST_NUM ARRAY_SIZE(mlx5e_sts)
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf)
{
struct mlx5e_priv *priv = netdev_priv(ndev);
int i, count = 0;
mutex_lock(&priv->state_lock);
netdev_info(ndev, "Self test begin..\n");
for (i = 0; i < MLX5E_ST_NUM; i++) {
struct mlx5e_st st = mlx5e_sts[i];
if (st.cond_func && st.cond_func(priv))
continue;
netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
buf[count] = st.st_func(priv);
netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
count++;
}
mutex_unlock(&priv->state_lock);
for (i = 0; i < count; i++) {
if (buf[i]) {
etest->flags |= ETH_TEST_FL_FAILED;
break;
}
}
netdev_info(ndev, "Self test out: status flags(0x%x)\n",
etest->flags);
}
int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
{
int i, count = 0;
for (i = 0; i < MLX5E_ST_NUM; i++) {
struct mlx5e_st st = mlx5e_sts[i];
if (st.cond_func && st.cond_func(priv))
continue;
if (data)
strcpy(data + count * ETH_GSTRING_LEN, st.name);
count++;
}
return count;
}
int mlx5e_self_test_num(struct mlx5e_priv *priv)
{
return mlx5e_self_test_fill_strings(priv, NULL);
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
|
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/etherdevice.h>
#include <linux/idr.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
#include "esw/indir_table.h"
#include "esw/acl/ofld.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
#include "lib/eq.h"
#include "lib/fs_chains.h"
#include "en_tc.h"
#include "en/mapping.h"
#include "devlink.h"
#include "lag/lag.h"
#include "en/tc/post_meter.h"
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
#define MLX5_ESW_VPORT_TBL_SIZE 128
#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
.flags = 0,
};
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
return xa_load(&esw->offloads.vport_reps, vport_num);
}
static void
mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
return;
if (attr->int_port) {
spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
return;
}
spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
}
/* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
* are not needed as well in the following process. So clear them all for simplicity.
*/
void
mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
{
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
void *misc2;
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
}
}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr,
struct mlx5_eswitch *src_esw,
u16 vport)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
u32 metadata;
void *misc2;
void *misc;
/* Use metadata matching because vport is not represented by single
* VHCA in dual-port RoCE mode, and matching on source vport may fail.
*/
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
if (mlx5_esw_indir_table_decap_vport(attr))
vport = mlx5_esw_indir_table_decap_vport(attr);
if (!attr->chain && esw_attr && esw_attr->int_port)
metadata =
mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
else
metadata =
mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, vport);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(src_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}
static int
esw_setup_decap_indir(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_table *ft;
if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
ft = mlx5_esw_indir_table_get(esw, attr,
mlx5_esw_indir_table_decap_vport(attr), true);
return PTR_ERR_OR_ZERO(ft);
}
static void
esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr)
{
if (mlx5_esw_indir_table_decap_vport(attr))
mlx5_esw_indir_table_put(esw,
mlx5_esw_indir_table_decap_vport(attr),
true);
}
static int
esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
struct mlx5e_meter_attr *meter,
int i)
{
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
dest[i].range.min = 0;
dest[i].range.max = meter->params.mtu;
dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
return 0;
}
static int
esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
u32 sampler_id,
int i)
{
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
dest[i].sampler_id = sampler_id;
return 0;
}
static int
esw_setup_ft_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
int i)
{
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = attr->dest_ft;
if (mlx5_esw_indir_table_decap_vport(attr))
return esw_setup_decap_indir(esw, attr);
return 0;
}
static void
esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_fs_chains *chains, int i)
{
if (mlx5_chains_ignore_flow_level_supported(chains))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
static void
esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, int i)
{
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
}
static int
esw_setup_chain_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level,
int i)
{
struct mlx5_flow_table *ft;
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
ft = mlx5_chains_get_table(chains, chain, prio, level);
if (IS_ERR(ft))
return PTR_ERR(ft);
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = ft;
return 0;
}
static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
int from, int to)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
int i;
for (i = from; i < to; i++)
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5_chains_put_table(chains, 0, 1, 0);
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev))
mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
false);
}
static bool
esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
{
int i;
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
return true;
return false;
}
static int
esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_fs_chains *chains,
struct mlx5_flow_attr *attr,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int err;
if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
/* flow steering cannot handle more than one dest with the same ft
* in a single flow
*/
if (esw_attr->out_count - esw_attr->split_count > 1)
return -EOPNOTSUPP;
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
if (err)
return err;
if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
}
(*i)++;
return 0;
}
static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
}
static bool
esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
bool result = false;
int i;
/* Indirect table is supported only for flows with in_port uplink
* and the destination is vport on the same eswitch as the uplink,
* return false in case at least one of destinations doesn't meet
* this criteria.
*/
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
if (esw_attr->dests[i].rep &&
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev)) {
result = true;
} else {
result = false;
break;
}
}
return result;
}
static int
esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int j, err;
if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
esw_attr->dests[j].rep->vport, false);
if (IS_ERR(dest[*i].ft)) {
err = PTR_ERR(dest[*i].ft);
goto err_indir_tbl_get;
}
}
if (mlx5_esw_indir_table_decap_vport(attr)) {
err = esw_setup_decap_indir(esw, attr);
if (err)
goto err_indir_tbl_get;
}
return 0;
err_indir_tbl_get:
esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
return err;
}
static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
esw_cleanup_decap_indir(esw, attr);
}
static void
esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
{
mlx5_chains_put_table(chains, chain, prio, level);
}
static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
{
return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id);
}
static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *esw_attr,
int attr_idx)
{
if (esw->offloads.ft_ipsec_tx_pol &&
esw_attr->dests[attr_idx].rep &&
esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
/* To be aligned with software, encryption is needed only for tunnel device */
(esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
return true;
return false;
}
static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *esw_attr)
{
int i;
if (!esw->offloads.ft_ipsec_tx_pol)
return true;
for (i = 0; i < esw_attr->split_count; i++)
if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
return false;
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
(esw_attr->out_count - esw_attr->split_count > 1))
return false;
return true;
}
static void
esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
int attr_idx, int dest_idx, bool pkt_reformat)
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
dest[dest_idx].vport.vhca_id =
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
mlx5_lag_is_mpesw(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
if (pkt_reformat) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
}
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
}
}
static void
esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
int attr_idx, int dest_idx, bool pkt_reformat)
{
dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
if (pkt_reformat &&
esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
}
}
static void
esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
int attr_idx, int dest_idx, bool pkt_reformat)
{
if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
attr_idx, dest_idx, pkt_reformat);
else
esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
attr_idx, dest_idx, pkt_reformat);
}
static int
esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
int i)
{
int j;
for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
return i;
}
static bool
esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
{
return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
mlx5_eswitch_vport_match_metadata_enabled(esw) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
}
static bool
esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
{
bool vf_dest = false, pf_dest = false;
int i;
for (i = 0; i < max_dest; i++) {
if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
if (dests[i].vport.num == MLX5_VPORT_UPLINK)
pf_dest = true;
else
vf_dest = true;
if (vf_dest && pf_dest)
return true;
}
return false;
}
static int
esw_setup_dests(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec,
int *i)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
int err = 0;
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
esw_setup_slow_path_dest(dest, flow_act, esw, *i);
(*i)++;
goto out;
}
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
(*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
err = esw_setup_indir_table(dest, flow_act, esw, attr, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
if (attr->dest_ft) {
err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
(*i)++;
} else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
1, 0, *i);
(*i)++;
}
}
out:
return err;
}
static void
esw_cleanup_dests(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr);
} else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr))
esw_cleanup_indir_table(esw, attr);
else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
esw_cleanup_chain_src_port_rewrite(esw, attr);
}
}
static void
esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
{
struct mlx5e_flow_meter_handle *meter;
meter = attr->meter_attr.meter;
flow_act->exe_aso.type = attr->exe_aso_type;
flow_act->exe_aso.object_id = meter->obj_id;
flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
/* use metadata reg 5 for packet color */
flow_act->exe_aso.return_reg_id = 5;
}
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = !!(esw_attr->split_count);
struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_destination *dest;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int i = 0;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return ERR_PTR(-EOPNOTSUPP);
if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
return ERR_PTR(-EOPNOTSUPP);
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
flow_act.action = attr->action;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
}
}
mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int err;
err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
if (err) {
rule = ERR_PTR(err);
goto err_create_goto_table;
}
/* Header rewrite with combined wire+loopback in FDB is not allowed */
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
esw_dests_to_vf_pf_vports(dest, i)) {
esw_warn(esw->dev,
"FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
rule = ERR_PTR(-EINVAL);
goto err_esw_get;
}
}
if (esw_attr->decap_pkt_reformat)
flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[i].counter_id = mlx5_fc_id(attr->counter);
i++;
}
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
if (attr->inner_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
esw_setup_meter(attr, &flow_act);
if (split) {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
} else {
if (attr->chain || attr->prio)
fdb = mlx5_chains_get_table(chains, attr->chain,
attr->prio, 0);
else
fdb = attr->ft;
if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
mlx5_eswitch_set_rule_source_port(esw, spec, attr,
esw_attr->in_mdev->priv.eswitch,
esw_attr->in_rep->vport);
}
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
if (!i) {
kfree(dest);
dest = NULL;
}
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
else
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule))
goto err_add_rule;
else
atomic64_inc(&esw->offloads.num_flows);
kfree(dest);
return rule;
err_add_rule:
if (split)
mlx5_esw_vporttbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
esw_cleanup_dests(esw, attr);
err_create_goto_table:
kfree(dest);
return rule;
}
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_destination *dest;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i, err = 0;
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
goto err_get_fast;
}
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
/* Source port rewrite (forward to ovs internal port or statck device) isn't
* supported in the rule of split action.
*/
err = -EOPNOTSUPP;
else
esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
if (err) {
rule = ERR_PTR(err);
goto err_chain_src_rewrite;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb;
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, attr,
esw_attr->in_mdev->priv.eswitch,
esw_attr->in_rep->vport);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
if (IS_ERR(rule)) {
i = esw_attr->split_count;
goto err_chain_src_rewrite;
}
atomic64_inc(&esw->offloads.num_flows);
kfree(dest);
return rule;
err_chain_src_rewrite:
mlx5_esw_vporttbl_put(esw, &fwd_attr);
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
kfree(dest);
return rule;
}
static void
__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr,
bool fwd_rule)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = (esw_attr->split_count > 0);
struct mlx5_vport_tbl_attr fwd_attr;
int i;
mlx5_del_flow_rules(rule);
if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl)
mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
}
}
atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule || split) {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
}
if (fwd_rule) {
mlx5_esw_vporttbl_put(esw, &fwd_attr);
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
} else {
if (split)
mlx5_esw_vporttbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
esw_cleanup_dests(esw, attr);
}
}
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
__mlx5_eswitch_del_rule(esw, rule, attr, false);
}
void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
__mlx5_eswitch_del_rule(esw, rule, attr, true);
}
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_eswitch *from_esw,
struct mlx5_eswitch_rep *rep,
u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
u16 vport;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
/* source vport is the esw manager */
vport = from_esw->manager_vport;
if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, vport);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(from_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = rep->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
} else {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = rep->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
}
if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
PTR_ERR(flow_rule));
out:
kvfree(spec);
return flow_rule;
}
EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
{
mlx5_del_flow_rules(rule);
}
void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
{
if (rule)
mlx5_del_flow_rules(rule);
}
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
MLX5_SET(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(fte_match_param, spec->match_criteria,
misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
vport_num, PTR_ERR(flow_rule));
kvfree(spec);
return flow_rule;
}
static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
{
return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
MLX5_FDB_TO_VPORT_REG_C_1;
}
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
u8 curr, wanted;
int err;
if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
MLX5_SET(query_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
if (err)
return err;
curr = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
wanted = MLX5_FDB_TO_VPORT_REG_C_0;
if (mlx5_eswitch_reg_c1_loopback_supported(esw))
wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
if (enable)
curr |= wanted;
else
curr &= ~wanted;
MLX5_SET(modify_esw_vport_context_in, min,
esw_vport_context.fdb_to_vport_reg_c_id, curr);
MLX5_SET(modify_esw_vport_context_in, min,
field_select.fdb_to_vport_reg_c_id, 1);
err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
if (!err) {
if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
else
esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
}
return err;
}
static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev,
struct mlx5_flow_spec *spec,
struct mlx5_flow_destination *dest)
{
void *misc;
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
MLX5_CAP_GEN(peer_dev, vhca_id));
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
}
dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest->vport.num = peer_dev->priv.eswitch->manager_vport;
dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
}
static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw,
struct mlx5_flow_spec *spec,
u16 vport)
{
void *misc;
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
vport));
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, vport);
}
}
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
/* total vports is the same for both e-switches */
int nvports = esw->total_vports;
struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
struct mlx5_vport *vport;
unsigned long i;
void *misc;
int err;
if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, MLX5_VPORT_PF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_pf_flow_err;
}
flows[vport->index] = flow;
}
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_ecpf_flow_err;
}
flows[vport->index] = flow;
}
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
esw_set_peer_miss_rule_source_port(esw,
peer_dev->priv.eswitch,
spec, vport->vport);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_vf_flow_err;
}
flows[vport->index] = flow;
}
if (mlx5_core_ec_sriov_enabled(esw->dev)) {
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
if (i >= mlx5_core_max_ec_vfs(peer_dev))
break;
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_ec_vf_flow_err;
}
flows[vport->index] = flow;
}
}
esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
kvfree(spec);
return 0;
add_ec_vf_flow_err:
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
if (!flows[vport->index])
continue;
mlx5_del_flow_rules(flows[vport->index]);
}
add_vf_flow_err:
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
if (!flows[vport->index])
continue;
mlx5_del_flow_rules(flows[vport->index]);
}
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
mlx5_del_flow_rules(flows[vport->index]);
}
add_ecpf_flow_err:
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[vport->index]);
}
add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
kvfree(flows);
alloc_flows_err:
kvfree(spec);
return err;
}
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows;
struct mlx5_vport *vport;
unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
if (!flows)
return;
if (mlx5_core_ec_sriov_enabled(esw->dev)) {
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
/* The flow for a particular vport could be NULL if the other ECPF
* has fewer or no VFs enabled
*/
if (!flows[vport->index])
continue;
mlx5_del_flow_rules(flows[vport->index]);
}
}
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
mlx5_del_flow_rules(flows[vport->index]);
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
mlx5_del_flow_rules(flows[vport->index]);
}
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[vport->index]);
}
kvfree(flows);
esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
}
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
void *headers_c;
void *headers_v;
int err = 0;
u8 *dmac_c;
u8 *dmac_v;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
err = -ENOMEM;
goto out;
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
outer_headers.dmac_47_16);
dmac_c[0] = 0x01;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
goto out;
}
esw->fdb_table.offloads.miss_rule_uni = flow_rule;
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
goto out;
}
esw->fdb_table.offloads.miss_rule_multi = flow_rule;
out:
kvfree(spec);
return err;
}
struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
{
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
struct mlx5_flow_context *flow_context;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
void *misc;
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
return ERR_PTR(-EOPNOTSUPP);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
ESW_REG_C0_USER_DATA_METADATA_MASK);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
flow_context = &spec->flow_context;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = tag;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = esw->offloads.ft_offloads;
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
kvfree(spec);
if (IS_ERR(flow_rule))
esw_warn(esw->dev,
"Failed to create restore rule for tag: %d, err(%d)\n",
tag, (int)PTR_ERR(flow_rule));
return flow_rule;
}
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
void
mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
u32 *flow_group_in,
int match_params)
{
void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in,
match_criteria);
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
} else {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS | match_params);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port);
}
}
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
struct mlx5_vport_tbl_attr attr;
struct mlx5_vport *vport;
unsigned long i;
attr.chain = 0;
attr.prio = 1;
mlx5_esw_for_each_vport(esw, i, vport) {
attr.vport = vport->vport;
attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
mlx5_esw_vporttbl_put(esw, &attr);
}
}
static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
struct mlx5_vport_tbl_attr attr;
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
unsigned long i;
attr.chain = 0;
attr.prio = 1;
mlx5_esw_for_each_vport(esw, i, vport) {
attr.vport = vport->vport;
attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
fdb = mlx5_esw_vporttbl_get(esw, &attr);
if (IS_ERR(fdb))
goto out;
}
return 0;
out:
esw_vport_tbl_put(esw);
return PTR_ERR(fdb);
}
#define fdb_modify_header_fwd_to_table_supported(esw) \
(MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
{
struct mlx5_core_dev *dev = esw->dev;
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
*flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
*flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
} else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
*flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
} else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
/* Disabled when ttl workaround is needed, e.g
* when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
*/
esw_warn(dev,
"Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
*flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
} else {
*flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
esw_info(dev, "Supported tc chains and prios offload\n");
}
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
*flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
}
static int
esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_table *nf_ft, *ft;
struct mlx5_chains_attr attr = {};
struct mlx5_fs_chains *chains;
int err;
esw_init_chains_offload_flags(esw, &attr.flags);
attr.ns = MLX5_FLOW_NAMESPACE_FDB;
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
attr.mapping = esw->offloads.reg_c0_obj_pool;
chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(chains)) {
err = PTR_ERR(chains);
esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
return err;
}
mlx5_chains_print_info(chains);
esw->fdb_table.offloads.esw_chains_priv = chains;
/* Create tc_end_ft which is the always created ft chain */
nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1, 0);
if (IS_ERR(nf_ft)) {
err = PTR_ERR(nf_ft);
goto nf_ft_err;
}
/* Always open the root for fast path */
ft = mlx5_chains_get_table(chains, 0, 1, 0);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto level_0_err;
}
/* Open level 1 for split fdb rules now if prios isn't supported */
if (!mlx5_chains_prios_supported(chains)) {
err = esw_vport_tbl_get(esw);
if (err)
goto level_1_err;
}
mlx5_chains_set_end_ft(chains, nf_ft);
return 0;
level_1_err:
mlx5_chains_put_table(chains, 0, 1, 0);
level_0_err:
mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
nf_ft_err:
mlx5_chains_destroy(chains);
esw->fdb_table.offloads.esw_chains_priv = NULL;
return err;
}
static void
esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
{
if (!mlx5_chains_prios_supported(chains))
esw_vport_tbl_put(esw);
mlx5_chains_put_table(chains, 0, 1, 0);
mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
mlx5_chains_destroy(chains);
}
#else /* CONFIG_MLX5_CLS_ACT */
static int
esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
{ return 0; }
static void
esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
{}
#endif
static int
esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
u32 *flow_group_in,
int *ix)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
int count, err = 0;
memset(flow_group_in, 0, inlen);
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
}
/* See comment at table_size calculation */
count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
*ix += count;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
goto out;
}
esw->fdb_table.offloads.send_to_vport_grp = g;
out:
return err;
}
static int
esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
u32 *flow_group_in,
int *ix)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
int err = 0;
if (!esw_src_port_rewrite_supported(esw))
return 0;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
MLX5_SET(create_flow_group_in, flow_group_in,
end_flow_index, *ix + esw->total_vports - 1);
*ix += esw->total_vports;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(esw->dev,
"Failed to create send-to-vport meta flow group err(%d)\n", err);
goto send_vport_meta_err;
}
esw->fdb_table.offloads.send_to_vport_meta_grp = g;
return 0;
send_vport_meta_err:
return err;
}
static int
esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
u32 *flow_group_in,
int *ix)
{
int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
int err = 0;
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return 0;
memset(flow_group_in, 0, inlen);
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in,
match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
}
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
*ix + max_peer_ports);
*ix += max_peer_ports + 1;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
goto out;
}
esw->fdb_table.offloads.peer_miss_grp = g;
out:
return err;
}
static int
esw_create_miss_group(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fdb,
u32 *flow_group_in,
int *ix)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
int err = 0;
u8 *dmac;
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers.dmac_47_16);
dmac[0] = 0x01;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
*ix + MLX5_ESW_MISS_FLOWS);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
goto miss_err;
}
esw->fdb_table.offloads.miss_grp = g;
err = esw_add_fdb_miss_rule(esw);
if (err)
goto miss_rule_err;
return 0;
miss_rule_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
return err;
}
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
int table_size, ix = 0, err = 0;
u32 flags = 0, *flow_group_in;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
err = -EOPNOTSUPP;
goto ns_err;
}
esw->fdb_table.offloads.ns = root_ns;
err = mlx5_flow_namespace_set_mode(root_ns,
esw->dev->priv.steering->mode);
if (err) {
esw_warn(dev, "Failed to set FDB namespace steering mode\n");
goto ns_err;
}
/* To be strictly correct:
* MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
* should be:
* esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
* peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
* but as the peer device might not be in switchdev mode it's not
* possible. We use the fact that by default FW sets max vfs and max sfs
* to the same value on both devices. If it needs to be changed in the future note
* the peer miss group should also be created based on the number of
* total vports of the peer (currently is also uses esw->total_vports).
*/
table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
*/
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
ft_attr.flags = flags;
ft_attr.max_fte = table_size;
ft_attr.prio = FDB_SLOW_PATH;
fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
goto slow_fdb_err;
}
esw->fdb_table.offloads.slow_fdb = fdb;
/* Create empty TC-miss managed table. This allows plugging in following
* priorities without directly exposing their level 0 table to
* eswitch_offloads and passing it as miss_fdb to following call to
* esw_chains_create().
*/
memset(&ft_attr, 0, sizeof(ft_attr));
ft_attr.prio = FDB_TC_MISS;
esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
goto tc_miss_table_err;
}
err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
if (err) {
esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
goto fdb_chains_err;
}
err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
if (err)
goto send_vport_err;
err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
if (err)
goto send_vport_meta_err;
err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
if (err)
goto peer_miss_err;
err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
if (err)
goto miss_err;
kvfree(flow_group_in);
return 0;
miss_err:
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
send_vport_meta_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
esw_chains_destroy(esw, esw_chains(esw));
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
tc_miss_table_err:
mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
ns_err:
kvfree(flow_group_in);
return err;
}
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
if (!mlx5_eswitch_get_slow_fdb(esw))
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
if (esw->fdb_table.offloads.send_to_vport_meta_grp)
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
esw_chains_destroy(esw, esw_chains(esw));
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
atomic64_set(&esw->user_count, 0);
}
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
{
int nvports;
nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
if (mlx5e_tc_int_port_supported(esw))
nvports += MLX5E_TC_MAX_INT_PORT_NUM;
return nvports;
}
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_namespace *ns;
int err = 0;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
return -EOPNOTSUPP;
}
ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
MLX5_ESW_FT_OFFLOADS_DROP_RULE;
ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft_offloads)) {
err = PTR_ERR(ft_offloads);
esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
return err;
}
esw->offloads.ft_offloads = ft_offloads;
return 0;
}
static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
mlx5_destroy_flow_table(offloads->ft_offloads);
}
static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
u32 *flow_group_in;
int nvports;
int err = 0;
nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
goto out;
}
esw->offloads.vport_rx_group = g;
out:
kvfree(flow_group_in);
return err;
}
static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
{
mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
}
static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
{
/* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
* for the drop rule, which is placed at the end of the table.
* So return the total of vport and int_port as rule index.
*/
return esw_get_nr_ft_offloads_steering_src_ports(esw);
}
static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
u32 *flow_group_in;
int flow_index;
int err = 0;
flow_index = esw_create_vport_rx_drop_rule_index(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
goto out;
}
esw->offloads.vport_rx_drop_group = g;
out:
kvfree(flow_group_in);
return err;
}
static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
{
if (esw->offloads.vport_rx_drop_group)
mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
}
void
mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
u16 vport,
struct mlx5_flow_spec *spec)
{
void *misc;
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
}
}
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
flow_rule = ERR_PTR(-ENOMEM);
goto out;
}
mlx5_esw_set_spec_source_port(esw, vport, spec);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;
}
out:
kvfree(spec);
return flow_rule;
}
static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
&flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
"fs offloads: Failed to add vport rx drop rule err %ld\n",
PTR_ERR(flow_rule));
return PTR_ERR(flow_rule);
}
esw->offloads.vport_rx_drop_rule = flow_rule;
return 0;
}
static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
{
if (esw->offloads.vport_rx_drop_rule)
mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
}
static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
{
u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_vport *vport;
unsigned long i;
if (!MLX5_CAP_GEN(dev, vport_group_manager))
return -EOPNOTSUPP;
if (!mlx5_esw_is_fdb_created(esw))
return -EOPNOTSUPP;
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
mlx5_mode = MLX5_INLINE_MODE_NONE;
goto out;
case MLX5_CAP_INLINE_MODE_L2:
mlx5_mode = MLX5_INLINE_MODE_L2;
goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
goto query_vports;
}
query_vports:
mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
if (prev_mlx5_mode != mlx5_mode)
return -EINVAL;
prev_mlx5_mode = mlx5_mode;
}
out:
*mode = mlx5_mode;
return 0;
}
static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
return;
mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
mlx5_destroy_flow_group(offloads->restore_group);
mlx5_destroy_flow_table(offloads->ft_offloads_restore);
}
static int esw_create_restore_table(struct mlx5_eswitch *esw)
{
u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
struct mlx5_modify_hdr *mod_hdr;
void *match_criteria, *misc;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *g;
u32 *flow_group_in;
int err = 0;
if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
return 0;
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
return -EOPNOTSUPP;
}
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in) {
err = -ENOMEM;
goto out_free;
}
ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
ft = mlx5_create_flow_table(ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
esw_warn(esw->dev, "Failed to create restore table, err %d\n",
err);
goto out_free;
}
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
ESW_REG_C0_USER_DATA_METADATA_MASK);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
ft_attr.max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS_2);
g = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create restore flow group, err: %d\n",
err);
goto err_group;
}
MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
MLX5_SET(copy_action_in, modact, src_field,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
MLX5_SET(copy_action_in, modact, dst_field,
MLX5_ACTION_IN_FIELD_METADATA_REG_B);
mod_hdr = mlx5_modify_header_alloc(esw->dev,
MLX5_FLOW_NAMESPACE_KERNEL, 1,
modact);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
esw_warn(dev, "Failed to create restore mod header, err: %d\n",
err);
goto err_mod_hdr;
}
esw->offloads.ft_offloads_restore = ft;
esw->offloads.restore_group = g;
esw->offloads.restore_copy_hdr_id = mod_hdr;
kvfree(flow_group_in);
return 0;
err_mod_hdr:
mlx5_destroy_flow_group(g);
err_group:
mlx5_destroy_flow_table(ft);
out_free:
kvfree(flow_group_in);
return err;
}
static int esw_offloads_start(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err;
esw->mode = MLX5_ESWITCH_OFFLOADS;
err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed setting eswitch to offloads");
esw->mode = MLX5_ESWITCH_LEGACY;
mlx5_rescan_drivers(esw->dev);
return err;
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
&esw->offloads.inline_mode)) {
esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
NL_SET_ERR_MSG_MOD(extack,
"Inline mode is different between vports");
}
}
return 0;
}
static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
int err;
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
if (!rep)
return -ENOMEM;
rep->vport = vport->vport;
rep->vport_index = vport->index;
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
if (err)
goto insert_err;
return 0;
insert_err:
kfree(rep);
return err;
}
static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
xa_erase(&esw->offloads.vport_reps, rep->vport);
kfree(rep);
}
static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
mlx5_esw_for_each_rep(esw, i, rep)
mlx5_esw_offloads_rep_cleanup(esw, rep);
xa_destroy(&esw->offloads.vport_reps);
}
static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
int err;
xa_init(&esw->offloads.vport_reps);
mlx5_esw_for_each_vport(esw, i, vport) {
err = mlx5_esw_offloads_rep_init(esw, vport);
if (err)
goto err;
}
return 0;
err:
esw_offloads_cleanup_reps(esw);
return err;
}
static int esw_port_metadata_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err = 0;
down_write(&esw->mode_lock);
if (mlx5_esw_is_fdb_created(esw)) {
err = -EBUSY;
goto done;
}
if (!mlx5_esw_vport_match_metadata_supported(esw)) {
err = -EOPNOTSUPP;
goto done;
}
if (ctx->val.vbool)
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
else
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
done:
up_write(&esw->mode_lock);
return err;
}
static int esw_port_metadata_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
return 0;
}
static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
u8 esw_mode;
esw_mode = mlx5_eswitch_mode(dev);
if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
"E-Switch must either disabled or non switchdev mode");
return -EBUSY;
}
return 0;
}
static const struct devlink_param esw_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
"esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
esw_port_metadata_get,
esw_port_metadata_set,
esw_port_metadata_validate),
};
int esw_offloads_init(struct mlx5_eswitch *esw)
{
int err;
err = esw_offloads_init_reps(esw);
if (err)
return err;
err = devl_params_register(priv_to_devlink(esw->dev),
esw_devlink_params,
ARRAY_SIZE(esw_devlink_params));
if (err)
goto err_params;
return 0;
err_params:
esw_offloads_cleanup_reps(esw);
return err;
}
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
{
devl_params_unregister(priv_to_devlink(esw->dev),
esw_devlink_params,
ARRAY_SIZE(esw_devlink_params));
esw_offloads_cleanup_reps(esw);
}
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_LOADED, REP_REGISTERED) == REP_LOADED)
esw->offloads.rep_ops[rep_type]->unload(rep);
}
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
mlx5_esw_for_each_rep(esw, i, rep)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
int err;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
if (err)
goto err_reps;
}
return 0;
err_reps:
atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
for (--rep_type; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
return err;
}
static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
return mlx5_esw_offloads_pf_vf_devlink_port_init(esw, vport);
}
void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
mlx5_esw_offloads_pf_vf_devlink_port_cleanup(esw, vport);
}
int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
struct mlx5_devlink_port *dl_port,
u32 controller, u32 sfnum)
{
return mlx5_esw_offloads_sf_devlink_port_init(esw, vport, dl_port, controller, sfnum);
}
void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
mlx5_esw_offloads_sf_devlink_port_cleanup(esw, vport);
}
int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int err;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
err = mlx5_esw_offloads_devlink_port_register(esw, vport);
if (err)
return err;
err = mlx5_esw_offloads_rep_load(esw, vport->vport);
if (err)
goto load_err;
return err;
load_err:
mlx5_esw_offloads_devlink_port_unregister(esw, vport);
return err;
}
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
mlx5_esw_offloads_rep_unload(esw, vport->vport);
mlx5_esw_offloads_devlink_port_unregister(esw, vport);
}
static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
struct mlx5_flow_root_namespace *root;
struct mlx5_flow_namespace *ns;
int err;
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type,
FS_FT_FDB);
if (master) {
ns = mlx5_get_flow_namespace(master,
MLX5_FLOW_NAMESPACE_FDB);
root = find_root(&ns->node);
mutex_lock(&root->chain_lock);
MLX5_SET(set_flow_table_root_in, in,
table_eswitch_owner_vhca_id_valid, 1);
MLX5_SET(set_flow_table_root_in, in,
table_eswitch_owner_vhca_id,
MLX5_CAP_GEN(master, vhca_id));
MLX5_SET(set_flow_table_root_in, in, table_id,
root->root_ft->id);
} else {
ns = mlx5_get_flow_namespace(slave,
MLX5_FLOW_NAMESPACE_FDB);
root = find_root(&ns->node);
mutex_lock(&root->chain_lock);
MLX5_SET(set_flow_table_root_in, in, table_id,
root->root_ft->id);
}
err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
mutex_unlock(&root->chain_lock);
return err;
}
static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave,
struct mlx5_vport *vport,
struct mlx5_flow_table *acl)
{
u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
int err = 0;
void *misc;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = slave->priv.eswitch->manager_vport;
dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
&dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
} else {
err = xa_insert(&vport->egress.offloads.bounce_rules,
slave_index, flow_rule, GFP_KERNEL);
if (err)
mlx5_del_flow_rules(flow_rule);
}
kvfree(spec);
return err;
}
static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
struct mlx5_flow_namespace *egress_ns,
struct mlx5_vport *vport, size_t count)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {
.max_fte = count, .prio = 0, .level = 0,
};
struct mlx5_flow_table *acl;
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
int err;
if (vport->egress.acl)
return 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
if (vport->vport || mlx5_core_is_ecpf(esw->dev))
ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
goto out;
}
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
match_criteria);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_MISC_PARAMETERS);
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count);
g = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR(g)) {
err = PTR_ERR(g);
goto err_group;
}
vport->egress.acl = acl;
vport->egress.offloads.bounce_grp = g;
vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB;
xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC);
kvfree(flow_group_in);
return 0;
err_group:
mlx5_destroy_flow_table(acl);
out:
kvfree(flow_group_in);
return err;
}
static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
{
if (!xa_empty(&vport->egress.offloads.bounce_rules))
return;
mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
vport->egress.offloads.bounce_grp = NULL;
mlx5_destroy_flow_table(vport->egress.acl);
vport->egress.acl = NULL;
}
static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
struct mlx5_core_dev *slave, size_t count)
{
struct mlx5_eswitch *esw = master->priv.eswitch;
u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
struct mlx5_flow_namespace *egress_ns;
struct mlx5_vport *vport;
int err;
vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
if (IS_ERR(vport))
return PTR_ERR(vport);
egress_ns = mlx5_get_flow_vport_acl_namespace(master,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
vport->index);
if (!egress_ns)
return -EINVAL;
if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
return 0;
err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
if (err)
return err;
if (xa_load(&vport->egress.offloads.bounce_rules, slave_index))
return -EINVAL;
err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl);
if (err)
goto err_rule;
return 0;
err_rule:
esw_master_egress_destroy_resources(vport);
return err;
}
static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
struct mlx5_core_dev *slave_dev)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
dev->priv.eswitch->manager_vport);
esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id));
if (xa_empty(&vport->egress.offloads.bounce_rules)) {
esw_acl_egress_ofld_cleanup(vport);
xa_destroy(&vport->egress.offloads.bounce_rules);
}
}
int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw, int max_slaves)
{
int err;
err = esw_set_slave_root_fdb(master_esw->dev,
slave_esw->dev);
if (err)
return err;
err = esw_set_master_egress_rule(master_esw->dev,
slave_esw->dev, max_slaves);
if (err)
goto err_acl;
return err;
err_acl:
esw_set_slave_root_fdb(NULL, slave_esw->dev);
return err;
}
void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw)
{
esw_set_slave_root_fdb(NULL, slave_esw->dev);
esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw)
{
const struct mlx5_eswitch_rep_ops *ops;
struct mlx5_eswitch_rep *rep;
unsigned long i;
u8 rep_type;
mlx5_esw_for_each_rep(esw, i, rep) {
rep_type = NUM_REP_TYPES;
while (rep_type--) {
ops = esw->offloads.rep_ops[rep_type];
if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
ops->event)
ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
}
}
}
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw)
{
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
mlx5e_tc_clean_fdb_peer_flows(esw);
#endif
mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
esw_del_fdb_peer_miss_rules(esw, peer_esw->dev);
}
static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw)
{
const struct mlx5_eswitch_rep_ops *ops;
struct mlx5_eswitch_rep *rep;
unsigned long i;
u8 rep_type;
int err;
err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
if (err)
return err;
mlx5_esw_for_each_rep(esw, i, rep) {
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
ops = esw->offloads.rep_ops[rep_type];
if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
ops->event) {
err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
if (err)
goto err_out;
}
}
}
return 0;
err_out:
mlx5_esw_offloads_unpair(esw, peer_esw);
return err;
}
static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw,
bool pair)
{
u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
struct mlx5_flow_root_namespace *peer_ns;
struct mlx5_flow_root_namespace *ns;
int err;
peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
ns = esw->dev->priv.steering->fdb_root_ns;
if (pair) {
err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id);
if (err)
return err;
err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id);
if (err) {
mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
return err;
}
} else {
mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id);
}
return 0;
}
static int mlx5_esw_offloads_devcom_event(int event,
void *my_data,
void *event_data)
{
struct mlx5_eswitch *esw = my_data;
struct mlx5_eswitch *peer_esw = event_data;
u16 esw_i, peer_esw_i;
bool esw_paired;
int err;
peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
esw_paired = !!xa_load(&esw->paired, peer_esw_i);
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
if (esw_paired)
break;
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
if (err)
goto err_out;
err = mlx5_esw_offloads_pair(esw, peer_esw);
if (err)
goto err_peer;
err = mlx5_esw_offloads_pair(peer_esw, esw);
if (err)
goto err_pair;
err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
if (err)
goto err_xa;
err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
if (err)
goto err_peer_xa;
esw->num_peers++;
peer_esw->num_peers++;
mlx5_devcom_comp_set_ready(esw->devcom, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
if (!esw_paired)
break;
peer_esw->num_peers--;
esw->num_peers--;
if (!esw->num_peers && !peer_esw->num_peers)
mlx5_devcom_comp_set_ready(esw->devcom, false);
xa_erase(&peer_esw->paired, esw_i);
xa_erase(&esw->paired, peer_esw_i);
mlx5_esw_offloads_unpair(peer_esw, esw);
mlx5_esw_offloads_unpair(esw, peer_esw);
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
break;
}
return 0;
err_peer_xa:
xa_erase(&esw->paired, peer_esw_i);
err_xa:
mlx5_esw_offloads_unpair(peer_esw, esw);
err_pair:
mlx5_esw_offloads_unpair(esw, peer_esw);
err_peer:
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
err_out:
mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
event, err);
return err;
}
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key)
{
int i;
for (i = 0; i < MLX5_MAX_PORTS; i++)
INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
mutex_init(&esw->offloads.peer_mutex);
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
if ((MLX5_VPORT_MANAGER(esw->dev) || mlx5_core_is_ecpf_esw_manager(esw->dev)) &&
!mlx5_lag_is_supported(esw->dev))
return;
xa_init(&esw->paired);
esw->num_peers = 0;
esw->devcom = mlx5_devcom_register_component(esw->dev->priv.devc,
MLX5_DEVCOM_ESW_OFFLOADS,
key,
mlx5_esw_offloads_devcom_event,
esw);
if (IS_ERR_OR_NULL(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_PAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR,
esw);
}
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
{
if (IS_ERR_OR_NULL(esw->devcom))
return;
mlx5_devcom_send_event(esw->devcom,
ESW_OFFLOADS_DEVCOM_UNPAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR,
esw);
mlx5_devcom_unregister_component(esw->devcom);
xa_destroy(&esw->paired);
esw->devcom = NULL;
}
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw)
{
return mlx5_devcom_comp_is_ready(esw->devcom);
}
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
{
if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
return false;
if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
MLX5_FDB_TO_VPORT_REG_C_0))
return false;
return true;
}
#define MLX5_ESW_METADATA_RSVD_UPLINK 1
/* Share the same metadata for uplink's. This is fine because:
* (a) In shared FDB mode (LAG) both uplink's are treated the
* same and tagged with the same metadata.
* (b) In non shared FDB mode, packets from physical port0
* cannot hit eswitch of PF1 and vice versa.
*/
static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
{
return MLX5_ESW_METADATA_RSVD_UPLINK;
}
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
/* Reserve 0xf for internal port offload */
u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
u32 pf_num;
int id;
/* Only 4 bits of pf_num */
pf_num = mlx5_get_dev_index(esw->dev);
if (pf_num > max_pf_num)
return 0;
/* Metadata is 4 bits of PFNUM and 12 bits of unique id */
/* Use only non-zero vport_id (2-4095) for all PF's */
id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
MLX5_ESW_METADATA_RSVD_UPLINK + 1,
vport_end_ida, GFP_KERNEL);
if (id < 0)
return 0;
id = (pf_num << ESW_VPORT_BITS) | id;
return id;
}
void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
{
u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
/* Metadata contains only 12 bits of actual ida id */
ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
}
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (vport->vport == MLX5_VPORT_UPLINK)
vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
else
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
}
static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
if (!vport->default_metadata)
return;
if (vport->vport == MLX5_VPORT_UPLINK)
return;
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return;
mlx5_esw_for_each_vport(esw, i, vport)
esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
int err;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
mlx5_esw_for_each_vport(esw, i, vport) {
err = esw_offloads_vport_metadata_setup(esw, vport);
if (err)
goto metadata_err;
}
return 0;
metadata_err:
esw_offloads_metadata_uninit(esw);
return err;
}
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int err;
err = esw_acl_ingress_ofld_setup(esw, vport);
if (err)
return err;
err = esw_acl_egress_ofld_setup(esw, vport);
if (err)
goto egress_err;
return 0;
egress_err:
esw_acl_ingress_ofld_cleanup(esw, vport);
return err;
}
void
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
esw_acl_egress_ofld_cleanup(vport);
esw_acl_ingress_ofld_cleanup(esw, vport);
}
static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *uplink, *manager;
int ret;
uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (IS_ERR(uplink))
return PTR_ERR(uplink);
ret = esw_vport_create_offloads_acl_tables(esw, uplink);
if (ret)
return ret;
manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
if (IS_ERR(manager)) {
ret = PTR_ERR(manager);
goto err_manager;
}
ret = esw_vport_create_offloads_acl_tables(esw, manager);
if (ret)
goto err_manager;
return 0;
err_manager:
esw_vport_destroy_offloads_acl_tables(esw, uplink);
return ret;
}
static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
if (!IS_ERR(vport))
esw_vport_destroy_offloads_acl_tables(esw, vport);
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
if (!IS_ERR(vport))
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
int ret;
if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
return 0;
ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
if (ret)
return ret;
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
mlx5_esw_offloads_rep_load(esw, rep->vport);
}
return 0;
}
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_indir_table *indir;
int err;
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
atomic64_set(&esw->user_count, 0);
indir = mlx5_esw_indir_table_init();
if (IS_ERR(indir)) {
err = PTR_ERR(indir);
goto create_indir_err;
}
esw->fdb_table.offloads.indir = indir;
err = esw_create_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
err = esw_create_offloads_table(esw);
if (err)
goto create_offloads_err;
err = esw_create_restore_table(esw);
if (err)
goto create_restore_err;
err = esw_create_offloads_fdb_tables(esw);
if (err)
goto create_fdb_err;
err = esw_create_vport_rx_group(esw);
if (err)
goto create_fg_err;
err = esw_create_vport_rx_drop_group(esw);
if (err)
goto create_rx_drop_fg_err;
err = esw_create_vport_rx_drop_rule(esw);
if (err)
goto create_rx_drop_rule_err;
return 0;
create_rx_drop_rule_err:
esw_destroy_vport_rx_drop_group(esw);
create_rx_drop_fg_err:
esw_destroy_vport_rx_group(esw);
create_fg_err:
esw_destroy_offloads_fdb_tables(esw);
create_fdb_err:
esw_destroy_restore_table(esw);
create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
esw_destroy_offloads_acl_tables(esw);
create_acl_err:
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
create_indir_err:
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
return err;
}
static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
{
esw_destroy_vport_rx_drop_rule(esw);
esw_destroy_vport_rx_drop_group(esw);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_acl_tables(esw);
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
static void
esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
{
struct devlink *devlink;
bool host_pf_disabled;
u16 new_num_vfs;
new_num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_pf_disabled);
if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
return;
devlink = priv_to_devlink(esw->dev);
devl_lock(devlink);
/* Number of VFs can only change from "0 to x" or "x to 0". */
if (esw->esw_funcs.num_vfs > 0) {
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
} else {
int err;
err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
MLX5_VPORT_UC_ADDR_CHANGE);
if (err) {
devl_unlock(devlink);
return;
}
}
esw->esw_funcs.num_vfs = new_num_vfs;
devl_unlock(devlink);
}
static void esw_functions_changed_event_handler(struct work_struct *work)
{
struct mlx5_host_work *host_work;
struct mlx5_eswitch *esw;
const u32 *out;
host_work = container_of(work, struct mlx5_host_work, work);
esw = host_work->esw;
out = mlx5_esw_query_functions(esw->dev);
if (IS_ERR(out))
goto out;
esw_vfs_changed_event_handler(esw, out);
kvfree(out);
out:
kfree(host_work);
}
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
{
struct mlx5_esw_functions *esw_funcs;
struct mlx5_host_work *host_work;
struct mlx5_eswitch *esw;
host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
if (!host_work)
return NOTIFY_DONE;
esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
host_work->esw = esw;
INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
queue_work(esw->work_queue, &host_work->work);
return NOTIFY_OK;
}
static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
{
const u32 *query_host_out;
if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
return 0;
query_host_out = mlx5_esw_query_functions(esw->dev);
if (IS_ERR(query_host_out))
return PTR_ERR(query_host_out);
/* Mark non local controller with non zero controller number. */
esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
host_params_context.host_number);
kvfree(query_host_out);
return 0;
}
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
{
/* Local controller is always valid */
if (controller == 0)
return true;
if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
return false;
/* External host number starts with zero in device */
return (controller == esw->offloads.host_number + 1);
}
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_vport *vport;
unsigned long i;
u64 mapping_id;
int err;
mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
err = mlx5_esw_host_number_init(esw);
if (err)
goto err_metadata;
err = esw_offloads_metadata_init(esw);
if (err)
goto err_metadata;
err = esw_set_passing_vport_metadata(esw, true);
if (err)
goto err_vport_metadata;
mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
sizeof(struct mlx5_mapped_obj),
ESW_REG_C0_USER_DATA_METADATA_MASK,
true);
if (IS_ERR(reg_c0_obj_pool)) {
err = PTR_ERR(reg_c0_obj_pool);
goto err_pool;
}
esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
err = esw_offloads_steering_init(esw);
if (err)
goto err_steering_init;
/* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
if (mlx5_core_ec_sriov_enabled(esw->dev))
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
/* Uplink vport rep must load first. */
err = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
if (err)
goto err_uplink;
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
if (err)
goto err_vports;
return 0;
err_vports:
mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
err_uplink:
esw_offloads_steering_cleanup(esw);
err_steering_init:
mapping_destroy(reg_c0_obj_pool);
err_pool:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_metadata_uninit(esw);
err_metadata:
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
}
static int esw_offloads_stop(struct mlx5_eswitch *esw,
struct netlink_ext_ack *extack)
{
int err;
esw->mode = MLX5_ESWITCH_LEGACY;
/* If changing from switchdev to legacy mode without sriov enabled,
* no need to create legacy fdb.
*/
if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
return 0;
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
if (err)
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
return err;
}
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_eswitch_disable_pf_vf_vports(esw);
mlx5_esw_offloads_rep_unload(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
mapping_destroy(esw->offloads.reg_c0_obj_pool);
esw_offloads_metadata_uninit(esw);
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
}
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
{
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
*mlx5_mode = MLX5_ESWITCH_LEGACY;
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
*mlx5_mode = MLX5_ESWITCH_OFFLOADS;
break;
default:
return -EINVAL;
}
return 0;
}
static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
{
switch (mlx5_mode) {
case MLX5_ESWITCH_LEGACY:
*mode = DEVLINK_ESWITCH_MODE_LEGACY;
break;
case MLX5_ESWITCH_OFFLOADS:
*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
break;
default:
return -EINVAL;
}
return 0;
}
static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
{
switch (mode) {
case DEVLINK_ESWITCH_INLINE_MODE_NONE:
*mlx5_mode = MLX5_INLINE_MODE_NONE;
break;
case DEVLINK_ESWITCH_INLINE_MODE_LINK:
*mlx5_mode = MLX5_INLINE_MODE_L2;
break;
case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
*mlx5_mode = MLX5_INLINE_MODE_IP;
break;
case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
*mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
break;
default:
return -EINVAL;
}
return 0;
}
static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
{
switch (mlx5_mode) {
case MLX5_INLINE_MODE_NONE:
*mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
break;
case MLX5_INLINE_MODE_L2:
*mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
break;
case MLX5_INLINE_MODE_IP:
*mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
break;
case MLX5_INLINE_MODE_TCP_UDP:
*mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
break;
default:
return -EINVAL;
}
return 0;
}
static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
{
struct net *devl_net, *netdev_net;
struct mlx5_eswitch *esw;
esw = mlx5_devlink_eswitch_nocheck_get(devlink);
netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
devl_net = devlink_net(devlink);
return net_eq(devl_net, netdev_net);
}
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
int err;
if (!mlx5_esw_allowed(esw))
return 0;
/* Take TC into account */
err = mlx5_esw_try_lock(esw);
if (err < 0)
return err;
esw->offloads.num_block_mode++;
mlx5_esw_unlock(esw);
return 0;
}
void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
esw->offloads.num_block_mode--;
up_write(&esw->mode_lock);
}
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
u16 cur_mlx5_mode, mlx5_mode = 0;
struct mlx5_eswitch *esw;
int err = 0;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
!esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
return -EPERM;
}
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
goto enable_lag;
}
cur_mlx5_mode = err;
err = 0;
if (cur_mlx5_mode == mlx5_mode)
goto unlock;
if (esw->offloads.num_block_mode) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change eswitch mode when IPsec SA and/or policies are configured");
err = -EOPNOTSUPP;
goto unlock;
}
mlx5_eswitch_disable_locked(esw);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
if (mlx5_devlink_trap_get_num_active(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change mode while devlink traps are active");
err = -EOPNOTSUPP;
goto unlock;
}
err = esw_offloads_start(esw, extack);
} else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
err = esw_offloads_stop(esw, extack);
mlx5_rescan_drivers(esw->dev);
} else {
err = -EINVAL;
}
unlock:
mlx5_esw_unlock(esw);
enable_lag:
mlx5_lag_enable_change(esw->dev);
return err;
}
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct mlx5_eswitch *esw;
int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
up_read(&esw->mode_lock);
return err;
}
static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_vport *vport;
u16 err_vport_num = 0;
unsigned long i;
int err = 0;
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
if (err) {
err_vport_num = vport->vport;
NL_SET_ERR_MSG_MOD(extack,
"Failed to set min inline on vport");
goto revert_inline_mode;
}
}
if (mlx5_core_ec_sriov_enabled(esw->dev)) {
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
if (err) {
err_vport_num = vport->vport;
NL_SET_ERR_MSG_MOD(extack,
"Failed to set min inline on vport");
goto revert_ec_vf_inline_mode;
}
}
}
return 0;
revert_ec_vf_inline_mode:
mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
if (vport->vport == err_vport_num)
break;
mlx5_modify_nic_vport_min_inline(dev,
vport->vport,
esw->offloads.inline_mode);
}
revert_inline_mode:
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
if (vport->vport == err_vport_num)
break;
mlx5_modify_nic_vport_min_inline(dev,
vport->vport,
esw->offloads.inline_mode);
}
return err;
}
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw;
u8 mlx5_mode;
int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_write(&esw->mode_lock);
switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
err = 0;
goto out;
}
fallthrough;
case MLX5_CAP_INLINE_MODE_L2:
NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
err = -EOPNOTSUPP;
goto out;
case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
break;
}
if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured");
err = -EOPNOTSUPP;
goto out;
}
err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
if (err)
goto out;
err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
if (err)
goto out;
esw->offloads.inline_mode = mlx5_mode;
up_write(&esw->mode_lock);
return 0;
out:
up_write(&esw->mode_lock);
return err;
}
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
{
struct mlx5_eswitch *esw;
int err;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
up_read(&esw->mode_lock);
return err;
}
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
if (!mlx5_esw_allowed(esw))
return true;
down_write(&esw->mode_lock);
if (esw->mode != MLX5_ESWITCH_LEGACY &&
esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
up_write(&esw->mode_lock);
return false;
}
esw->offloads.num_block_encap++;
up_write(&esw->mode_lock);
return true;
}
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
if (!mlx5_esw_allowed(esw))
return;
down_write(&esw->mode_lock);
esw->offloads.num_block_encap--;
up_write(&esw->mode_lock);
}
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
enum devlink_eswitch_encap_mode encap,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw;
int err = 0;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_write(&esw->mode_lock);
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
err = -EOPNOTSUPP;
goto unlock;
}
if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
err = -EOPNOTSUPP;
goto unlock;
}
if (esw->mode == MLX5_ESWITCH_LEGACY) {
esw->offloads.encap = encap;
goto unlock;
}
if (esw->offloads.encap == encap)
goto unlock;
if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured");
err = -EOPNOTSUPP;
goto unlock;
}
if (esw->offloads.num_block_encap) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when IPsec SA and/or policies are configured");
err = -EOPNOTSUPP;
goto unlock;
}
esw_destroy_offloads_fdb_tables(esw);
esw->offloads.encap = encap;
err = esw_create_offloads_fdb_tables(esw);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed re-creating fast FDB table");
esw->offloads.encap = !encap;
(void)esw_create_offloads_fdb_tables(esw);
}
unlock:
up_write(&esw->mode_lock);
return err;
}
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap)
{
struct mlx5_eswitch *esw;
esw = mlx5_devlink_eswitch_get(devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
up_read(&esw->mode_lock);
return 0;
}
static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
/* Currently, only ECPF based device has representor for host PF. */
if (vport_num == MLX5_VPORT_PF &&
!mlx5_core_is_ecpf_esw_manager(esw->dev))
return false;
if (vport_num == MLX5_VPORT_ECPF &&
!mlx5_ecpf_vport_exists(esw->dev))
return false;
return true;
}
void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
const struct mlx5_eswitch_rep_ops *ops,
u8 rep_type)
{
struct mlx5_eswitch_rep_data *rep_data;
struct mlx5_eswitch_rep *rep;
unsigned long i;
esw->offloads.rep_ops[rep_type] = ops;
mlx5_esw_for_each_rep(esw, i, rep) {
if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
rep->esw = esw;
rep_data = &rep->rep_data[rep_type];
atomic_set(&rep_data->state, REP_REGISTERED);
}
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
if (esw->mode == MLX5_ESWITCH_OFFLOADS)
__unload_reps_all_vport(esw, rep_type);
mlx5_esw_for_each_rep(esw, i, rep)
atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
return rep->rep_data[rep_type].priv;
}
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
u16 vport,
u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
rep = mlx5_eswitch_get_rep(esw, vport);
if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
esw->offloads.rep_ops[rep_type]->get_proto_dev)
return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
return NULL;
}
EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
{
return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
}
EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
u16 vport)
{
return mlx5_eswitch_get_rep(esw, vport);
}
EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
{
return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
}
EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
{
return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
}
EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
u16 vport_num)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
if (WARN_ON_ONCE(IS_ERR(vport)))
return 0;
return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
{
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
void *query_ctx;
void *hca_caps;
int err;
*vhca_id = 0;
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
return -ENOMEM;
err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
if (err)
goto out_free;
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
*vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
out_free:
kfree(query_ctx);
return err;
}
int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 *old_entry, *vhca_map_entry, vhca_id;
int err;
err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
if (err) {
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
vport_num, err);
return err;
}
vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
if (!vhca_map_entry)
return -ENOMEM;
*vhca_map_entry = vport_num;
old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
if (xa_is_err(old_entry)) {
kfree(vhca_map_entry);
return xa_err(old_entry);
}
kfree(old_entry);
return 0;
}
void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
{
u16 *vhca_map_entry, vhca_id;
int err;
err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
if (err)
esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
vport_num, err);
vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
kfree(vhca_map_entry);
}
int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
{
u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
if (!res)
return -ENOENT;
*vport_num = *res;
return 0;
}
u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
u16 vport_num)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
if (WARN_ON_ONCE(IS_ERR(vport)))
return 0;
return vport->metadata;
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN;
mutex_unlock(&esw->state_lock);
return 0;
}
int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
return mlx5_eswitch_set_vport_mac(esw, vport->vport, hw_addr);
}
int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
*is_enabled = vport->info.mig_enabled;
mutex_unlock(&esw->state_lock);
return 0;
}
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
void *query_ctx;
void *hca_caps;
int err;
if (!MLX5_CAP_GEN(esw->dev, migration)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
if (vport->info.mig_enabled == enable) {
err = 0;
goto out;
}
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx) {
err = -ENOMEM;
goto out;
}
err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
MLX5_CAP_GENERAL_2);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
goto out_free;
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
goto out_free;
}
vport->info.mig_enabled = enable;
out_free:
kfree(query_ctx);
out:
mutex_unlock(&esw->state_lock);
return err;
}
int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
*is_enabled = vport->info.roce_enabled;
mutex_unlock(&esw->state_lock);
return 0;
}
int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
u16 vport_num = vport->vport;
void *query_ctx;
void *hca_caps;
int err;
if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support VHCA management");
return -EOPNOTSUPP;
}
mutex_lock(&esw->state_lock);
if (vport->info.roce_enabled == enable) {
err = 0;
goto out;
}
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx) {
err = -ENOMEM;
goto out;
}
err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
MLX5_CAP_GENERAL);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
goto out_free;
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
goto out_free;
}
vport->info.roce_enabled = enable;
out_free:
kfree(query_ctx);
out:
mutex_unlock(&esw->state_lock);
return err;
}
int
mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
{
struct mlx5_flow_destination new_dest = {};
struct mlx5_flow_destination old_dest = {};
if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
return 0;
esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
}
#ifdef CONFIG_XFRM_OFFLOAD
int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
int err = 0;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPSec crypto");
return -EOPNOTSUPP;
}
vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
if (!vport->enabled) {
err = -EOPNOTSUPP;
goto unlock;
}
*is_enabled = vport->info.ipsec_crypto_enabled;
unlock:
mutex_unlock(&esw->state_lock);
return err;
}
int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
u16 vport_num;
int err;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
err = mlx5_esw_ipsec_vf_crypto_offload_supported(esw->dev, vport_num);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Device doesn't support IPsec crypto");
return err;
}
vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
if (!vport->enabled) {
err = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
goto unlock;
}
if (vport->info.ipsec_crypto_enabled == enable)
goto unlock;
if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
err = -EBUSY;
goto unlock;
}
err = mlx5_esw_ipsec_vf_crypto_offload_set(esw, vport, enable);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set IPsec crypto");
goto unlock;
}
vport->info.ipsec_crypto_enabled = enable;
if (enable)
esw->enabled_ipsec_vf_count++;
else
esw->enabled_ipsec_vf_count--;
unlock:
mutex_unlock(&esw->state_lock);
return err;
}
int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
int err = 0;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
return -EOPNOTSUPP;
}
vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
if (!vport->enabled) {
err = -EOPNOTSUPP;
goto unlock;
}
*is_enabled = vport->info.ipsec_packet_enabled;
unlock:
mutex_unlock(&esw->state_lock);
return err;
}
int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
bool enable,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
u16 vport_num;
int err;
esw = mlx5_devlink_eswitch_get(port->devlink);
if (IS_ERR(esw))
return PTR_ERR(esw);
vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Device doesn't support IPsec packet mode");
return err;
}
vport = mlx5_devlink_port_vport_get(port);
mutex_lock(&esw->state_lock);
if (!vport->enabled) {
err = -EOPNOTSUPP;
NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
goto unlock;
}
if (vport->info.ipsec_packet_enabled == enable)
goto unlock;
if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
err = -EBUSY;
goto unlock;
}
err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to set IPsec packet mode");
goto unlock;
}
vport->info.ipsec_packet_enabled = enable;
if (enable)
esw->enabled_ipsec_vf_count++;
else
esw->enabled_ipsec_vf_count--;
unlock:
mutex_unlock(&esw->state_lock);
return err;
}
#endif /* CONFIG_XFRM_OFFLOAD */
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
|
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved
#include <linux/hwmon.h>
#include <linux/bitmap.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/port.h>
#include "mlx5_core.h"
#include "hwmon.h"
#define CHANNELS_TYPE_NUM 2 /* chip channel and temp channel */
#define CHIP_CONFIG_NUM 1
/* module 0 is mapped to sensor_index 64 in MTMP register */
#define to_mtmp_module_sensor_idx(idx) (64 + (idx))
/* All temperatures retrieved in units of 0.125C. hwmon framework expect
* it in units of millidegrees C. Hence multiply values by 125.
*/
#define mtmp_temp_to_mdeg(temp) ((temp) * 125)
struct temp_channel_desc {
u32 sensor_index;
char sensor_name[32];
};
/* chip_channel_config and channel_info arrays must be 0-terminated, hence + 1 */
struct mlx5_hwmon {
struct mlx5_core_dev *mdev;
struct device *hwmon_dev;
struct hwmon_channel_info chip_info;
u32 chip_channel_config[CHIP_CONFIG_NUM + 1];
struct hwmon_channel_info temp_info;
u32 *temp_channel_config;
const struct hwmon_channel_info *channel_info[CHANNELS_TYPE_NUM + 1];
struct hwmon_chip_info chip;
struct temp_channel_desc *temp_channel_desc;
u32 asic_platform_scount;
u32 module_scount;
};
static int mlx5_hwmon_query_mtmp(struct mlx5_core_dev *mdev, u32 sensor_index, u32 *mtmp_out)
{
u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {};
MLX5_SET(mtmp_reg, mtmp_in, sensor_index, sensor_index);
return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in),
mtmp_out, MLX5_ST_SZ_BYTES(mtmp_reg),
MLX5_REG_MTMP, 0, 0);
}
static int mlx5_hwmon_reset_max_temp(struct mlx5_core_dev *mdev, int sensor_index)
{
u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {};
MLX5_SET(mtmp_reg, mtmp_in, sensor_index, sensor_index);
MLX5_SET(mtmp_reg, mtmp_in, mtr, 1);
return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in),
mtmp_out, sizeof(mtmp_out),
MLX5_REG_MTMP, 0, 0);
}
static int mlx5_hwmon_enable_max_temp(struct mlx5_core_dev *mdev, int sensor_index)
{
u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
u32 mtmp_in[MLX5_ST_SZ_DW(mtmp_reg)] = {};
int err;
err = mlx5_hwmon_query_mtmp(mdev, sensor_index, mtmp_in);
if (err)
return err;
MLX5_SET(mtmp_reg, mtmp_in, mte, 1);
return mlx5_core_access_reg(mdev, mtmp_in, sizeof(mtmp_in),
mtmp_out, sizeof(mtmp_out),
MLX5_REG_MTMP, 0, 1);
}
static int mlx5_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct mlx5_hwmon *hwmon = dev_get_drvdata(dev);
u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
int err;
if (type != hwmon_temp)
return -EOPNOTSUPP;
err = mlx5_hwmon_query_mtmp(hwmon->mdev, hwmon->temp_channel_desc[channel].sensor_index,
mtmp_out);
if (err)
return err;
switch (attr) {
case hwmon_temp_input:
*val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, temperature));
return 0;
case hwmon_temp_highest:
*val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, max_temperature));
return 0;
case hwmon_temp_crit:
*val = mtmp_temp_to_mdeg(MLX5_GET(mtmp_reg, mtmp_out, temp_threshold_hi));
return 0;
default:
return -EOPNOTSUPP;
}
}
static int mlx5_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long val)
{
struct mlx5_hwmon *hwmon = dev_get_drvdata(dev);
if (type != hwmon_temp || attr != hwmon_temp_reset_history)
return -EOPNOTSUPP;
return mlx5_hwmon_reset_max_temp(hwmon->mdev,
hwmon->temp_channel_desc[channel].sensor_index);
}
static umode_t mlx5_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
int channel)
{
if (type != hwmon_temp)
return 0;
switch (attr) {
case hwmon_temp_input:
case hwmon_temp_highest:
case hwmon_temp_crit:
case hwmon_temp_label:
return 0444;
case hwmon_temp_reset_history:
return 0200;
default:
return 0;
}
}
static int mlx5_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, const char **str)
{
struct mlx5_hwmon *hwmon = dev_get_drvdata(dev);
if (type != hwmon_temp || attr != hwmon_temp_label)
return -EOPNOTSUPP;
*str = (const char *)hwmon->temp_channel_desc[channel].sensor_name;
return 0;
}
static const struct hwmon_ops mlx5_hwmon_ops = {
.read = mlx5_hwmon_read,
.read_string = mlx5_hwmon_read_string,
.is_visible = mlx5_hwmon_is_visible,
.write = mlx5_hwmon_write,
};
static int mlx5_hwmon_init_channels_names(struct mlx5_hwmon *hwmon)
{
u32 i;
for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++) {
u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)] = {};
char *sensor_name;
int err;
err = mlx5_hwmon_query_mtmp(hwmon->mdev, hwmon->temp_channel_desc[i].sensor_index,
mtmp_out);
if (err)
return err;
sensor_name = MLX5_ADDR_OF(mtmp_reg, mtmp_out, sensor_name_hi);
if (!*sensor_name) {
snprintf(hwmon->temp_channel_desc[i].sensor_name,
sizeof(hwmon->temp_channel_desc[i].sensor_name), "sensor%u",
hwmon->temp_channel_desc[i].sensor_index);
continue;
}
memcpy(&hwmon->temp_channel_desc[i].sensor_name, sensor_name,
MLX5_FLD_SZ_BYTES(mtmp_reg, sensor_name_hi) +
MLX5_FLD_SZ_BYTES(mtmp_reg, sensor_name_lo));
}
return 0;
}
static int mlx5_hwmon_get_module_sensor_index(struct mlx5_core_dev *mdev, u32 *module_index)
{
int module_num;
int err;
err = mlx5_query_module_num(mdev, &module_num);
if (err)
return err;
*module_index = to_mtmp_module_sensor_idx(module_num);
return 0;
}
static int mlx5_hwmon_init_sensors_indexes(struct mlx5_hwmon *hwmon, u64 sensor_map)
{
DECLARE_BITMAP(smap, BITS_PER_TYPE(sensor_map));
unsigned long bit_pos;
int err = 0;
int i = 0;
bitmap_from_u64(smap, sensor_map);
for_each_set_bit(bit_pos, smap, BITS_PER_TYPE(sensor_map)) {
hwmon->temp_channel_desc[i].sensor_index = bit_pos;
i++;
}
if (hwmon->module_scount)
err = mlx5_hwmon_get_module_sensor_index(hwmon->mdev,
&hwmon->temp_channel_desc[i].sensor_index);
return err;
}
static void mlx5_hwmon_channel_info_init(struct mlx5_hwmon *hwmon)
{
int i;
hwmon->channel_info[0] = &hwmon->chip_info;
hwmon->channel_info[1] = &hwmon->temp_info;
hwmon->chip_channel_config[0] = HWMON_C_REGISTER_TZ;
hwmon->chip_info.config = (const u32 *)hwmon->chip_channel_config;
hwmon->chip_info.type = hwmon_chip;
for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++)
hwmon->temp_channel_config[i] = HWMON_T_INPUT | HWMON_T_HIGHEST | HWMON_T_CRIT |
HWMON_T_RESET_HISTORY | HWMON_T_LABEL;
hwmon->temp_info.config = (const u32 *)hwmon->temp_channel_config;
hwmon->temp_info.type = hwmon_temp;
}
static int mlx5_hwmon_is_module_mon_cap(struct mlx5_core_dev *mdev, bool *mon_cap)
{
u32 mtmp_out[MLX5_ST_SZ_DW(mtmp_reg)];
u32 module_index;
int err;
err = mlx5_hwmon_get_module_sensor_index(mdev, &module_index);
if (err)
return err;
err = mlx5_hwmon_query_mtmp(mdev, module_index, mtmp_out);
if (err)
return err;
if (MLX5_GET(mtmp_reg, mtmp_out, temperature))
*mon_cap = true;
return 0;
}
static int mlx5_hwmon_get_sensors_count(struct mlx5_core_dev *mdev, u32 *asic_platform_scount)
{
u32 mtcap_out[MLX5_ST_SZ_DW(mtcap_reg)] = {};
u32 mtcap_in[MLX5_ST_SZ_DW(mtcap_reg)] = {};
int err;
err = mlx5_core_access_reg(mdev, mtcap_in, sizeof(mtcap_in),
mtcap_out, sizeof(mtcap_out),
MLX5_REG_MTCAP, 0, 0);
if (err)
return err;
*asic_platform_scount = MLX5_GET(mtcap_reg, mtcap_out, sensor_count);
return 0;
}
static void mlx5_hwmon_free(struct mlx5_hwmon *hwmon)
{
if (!hwmon)
return;
kfree(hwmon->temp_channel_config);
kfree(hwmon->temp_channel_desc);
kfree(hwmon);
}
static struct mlx5_hwmon *mlx5_hwmon_alloc(struct mlx5_core_dev *mdev)
{
struct mlx5_hwmon *hwmon;
bool mon_cap = false;
u32 sensors_count;
int err;
hwmon = kzalloc(sizeof(*mdev->hwmon), GFP_KERNEL);
if (!hwmon)
return ERR_PTR(-ENOMEM);
err = mlx5_hwmon_get_sensors_count(mdev, &hwmon->asic_platform_scount);
if (err)
goto err_free_hwmon;
/* check if module sensor has thermal mon cap. if yes, allocate channel desc for it */
err = mlx5_hwmon_is_module_mon_cap(mdev, &mon_cap);
if (err)
goto err_free_hwmon;
hwmon->module_scount = mon_cap ? 1 : 0;
sensors_count = hwmon->asic_platform_scount + hwmon->module_scount;
hwmon->temp_channel_desc = kcalloc(sensors_count, sizeof(*hwmon->temp_channel_desc),
GFP_KERNEL);
if (!hwmon->temp_channel_desc) {
err = -ENOMEM;
goto err_free_hwmon;
}
/* sensors configuration values array, must be 0-terminated hence, + 1 */
hwmon->temp_channel_config = kcalloc(sensors_count + 1, sizeof(*hwmon->temp_channel_config),
GFP_KERNEL);
if (!hwmon->temp_channel_config) {
err = -ENOMEM;
goto err_free_temp_channel_desc;
}
hwmon->mdev = mdev;
return hwmon;
err_free_temp_channel_desc:
kfree(hwmon->temp_channel_desc);
err_free_hwmon:
kfree(hwmon);
return ERR_PTR(err);
}
static int mlx5_hwmon_dev_init(struct mlx5_hwmon *hwmon)
{
u32 mtcap_out[MLX5_ST_SZ_DW(mtcap_reg)] = {};
u32 mtcap_in[MLX5_ST_SZ_DW(mtcap_reg)] = {};
int err;
int i;
err = mlx5_core_access_reg(hwmon->mdev, mtcap_in, sizeof(mtcap_in),
mtcap_out, sizeof(mtcap_out),
MLX5_REG_MTCAP, 0, 0);
if (err)
return err;
mlx5_hwmon_channel_info_init(hwmon);
mlx5_hwmon_init_sensors_indexes(hwmon, MLX5_GET64(mtcap_reg, mtcap_out, sensor_map));
err = mlx5_hwmon_init_channels_names(hwmon);
if (err)
return err;
for (i = 0; i < hwmon->asic_platform_scount + hwmon->module_scount; i++) {
err = mlx5_hwmon_enable_max_temp(hwmon->mdev,
hwmon->temp_channel_desc[i].sensor_index);
if (err)
return err;
}
hwmon->chip.ops = &mlx5_hwmon_ops;
hwmon->chip.info = (const struct hwmon_channel_info **)hwmon->channel_info;
return 0;
}
int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev)
{
struct device *dev = mdev->device;
struct mlx5_hwmon *hwmon;
int err;
if (!MLX5_CAP_MCAM_REG(mdev, mtmp))
return 0;
hwmon = mlx5_hwmon_alloc(mdev);
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
err = mlx5_hwmon_dev_init(hwmon);
if (err)
goto err_free_hwmon;
hwmon->hwmon_dev = hwmon_device_register_with_info(dev, "mlx5",
hwmon,
&hwmon->chip,
NULL);
if (IS_ERR(hwmon->hwmon_dev)) {
err = PTR_ERR(hwmon->hwmon_dev);
goto err_free_hwmon;
}
mdev->hwmon = hwmon;
return 0;
err_free_hwmon:
mlx5_hwmon_free(hwmon);
return err;
}
void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev)
{
struct mlx5_hwmon *hwmon = mdev->hwmon;
if (!hwmon)
return;
hwmon_device_unregister(hwmon->hwmon_dev);
mlx5_hwmon_free(hwmon);
mdev->hwmon = NULL;
}
|
linux-master
|
drivers/net/ethernet/mellanox/mlx5/core/hwmon.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.