python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Goramo PCI200SYN synchronous serial card driver for Linux
*
* Copyright (C) 2002-2008 Krzysztof Halasa <[email protected]>
*
* For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>
*
* Sources of information:
* Hitachi HD64572 SCA-II User's Manual
* PLX Technology Inc. PCI9052 Data Book
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "hd64572.h"
#undef DEBUG_PKT
#define DEBUG_RINGS
#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */
#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */
#define MAX_TX_BUFFERS 10
static int pci_clock_freq = 33000000;
#define CLOCK_BASE pci_clock_freq
/* PLX PCI9052 local configuration and shared runtime registers.
* This structure can be used to access 9052 registers (memory mapped).
*/
typedef struct {
u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
u32 loc_rom_range; /* 10h : Local ROM Range */
u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
u32 loc_rom_base; /* 24h : Local ROM Base */
u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
} plx9052;
typedef struct port_s {
struct napi_struct napi;
struct net_device *netdev;
struct card_s *card;
spinlock_t lock; /* TX lock */
sync_serial_settings settings;
int rxpart; /* partial frame received, next frame invalid*/
unsigned short encoding;
unsigned short parity;
u16 rxin; /* rx ring buffer 'in' pointer */
u16 txin; /* tx ring buffer 'in' and 'last' pointers */
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
} port_t;
typedef struct card_s {
u8 __iomem *rambase; /* buffer memory base (virtual) */
u8 __iomem *scabase; /* SCA memory base (virtual) */
plx9052 __iomem *plxbase;/* PLX registers memory base (virtual) */
u16 rx_ring_buffers; /* number of buffers in a ring */
u16 tx_ring_buffers;
u16 buff_offset; /* offset of first buffer of first channel */
u8 irq; /* interrupt request level */
port_t ports[2];
} card_t;
#define get_port(card, port) (&(card)->ports[port])
#define sca_flush(card) (sca_in(IER0, card))
static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
{
int len;
do {
len = length > 256 ? 256 : length;
memcpy_toio(dest, src, len);
dest += len;
src += len;
length -= len;
readb(dest);
} while (len);
}
#undef memcpy_toio
#define memcpy_toio new_memcpy_toio
#include "hd64572.c"
static void pci200_set_iface(port_t *port)
{
card_t *card = port->card;
u16 msci = get_msci(port);
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
break;
case CLOCK_TXINT:
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
break;
case CLOCK_TXFROMRX:
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
break;
default: /* EXTernal clock */
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
break;
}
port->rxs = rxs;
port->txs = txs;
sca_out(rxs, msci + RXS, card);
sca_out(txs, msci + TXS, card);
sca_set_port(port);
}
static int pci200_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int result = hdlc_open(dev);
if (result)
return result;
sca_open(dev);
pci200_set_iface(port);
sca_flush(port->card);
return 0;
}
static int pci200_close(struct net_device *dev)
{
sca_close(dev);
sca_flush(dev_to_port(dev)->card);
hdlc_close(dev);
return 0;
}
static int pci200_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
return -EOPNOTSUPP;
}
static int pci200_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
port_t *port = dev_to_port(dev);
switch (ifs->type) {
case IF_GET_IFACE:
ifs->type = IF_IFACE_V35;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
return -EFAULT;
return 0;
case IF_IFACE_V35:
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
if (new_line.clock_type != CLOCK_EXT &&
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
memcpy(&port->settings, &new_line, size); /* Update settings */
pci200_set_iface(port);
sca_flush(port->card);
return 0;
default:
return hdlc_ioctl(dev, ifs);
}
}
static void pci200_pci_remove_one(struct pci_dev *pdev)
{
int i;
card_t *card = pci_get_drvdata(pdev);
for (i = 0; i < 2; i++)
if (card->ports[i].card)
unregister_hdlc_device(card->ports[i].netdev);
if (card->irq)
free_irq(card->irq, card);
if (card->rambase)
iounmap(card->rambase);
if (card->scabase)
iounmap(card->scabase);
if (card->plxbase)
iounmap(card->plxbase);
pci_release_regions(pdev);
pci_disable_device(pdev);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
free_netdev(card->ports[1].netdev);
kfree(card);
}
static const struct net_device_ops pci200_ops = {
.ndo_open = pci200_open,
.ndo_stop = pci200_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_siocwandev = pci200_ioctl,
.ndo_siocdevprivate = pci200_siocdevprivate,
};
static int pci200_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
card_t *card;
u32 __iomem *p;
int i;
u32 ramsize;
u32 ramphys; /* buffer memory base */
u32 scaphys; /* SCA memory base */
u32 plxphys; /* PLX registers memory base */
i = pci_enable_device(pdev);
if (i)
return i;
i = pci_request_regions(pdev, "PCI200SYN");
if (i) {
pci_disable_device(pdev);
return i;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
pci_set_drvdata(pdev, card);
card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]);
if (!card->ports[0].netdev || !card->ports[1].netdev) {
pr_err("unable to allocate memory\n");
pci200_pci_remove_one(pdev);
return -ENOMEM;
}
if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
pr_err("invalid card EEPROM parameters\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
if (!card->plxbase || !card->scabase || !card->rambase) {
pr_err("ioremap() failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
/* Reset PLX */
p = &card->plxbase->init_ctrl;
writel(readl(p) | 0x40000000, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
writel(readl(p) & ~0x40000000, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
ramsize = sca_detect_ram(card, card->rambase,
pci_resource_len(pdev, 3));
/* number of TX + RX buffers for one port - this is dual port card */
i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU));
card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
card->rx_ring_buffers = i - card->tx_ring_buffers;
card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers +
card->rx_ring_buffers);
pr_info("%u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n",
ramsize / 1024, ramphys,
pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
if (card->tx_ring_buffers < 1) {
pr_err("RAM test failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
}
/* Enable interrupts on the PCI bridge */
p = &card->plxbase->intr_ctrl_stat;
writew(readw(p) | 0x0040, p);
/* Allocate IRQ */
if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) {
pr_warn("could not allocate IRQ%d\n", pdev->irq);
pci200_pci_remove_one(pdev);
return -EBUSY;
}
card->irq = pdev->irq;
sca_init(card, 0);
for (i = 0; i < 2; i++) {
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
port->chan = i;
spin_lock_init(&port->lock);
dev->irq = card->irq;
dev->mem_start = ramphys;
dev->mem_end = ramphys + ramsize - 1;
dev->tx_queue_len = 50;
dev->netdev_ops = &pci200_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
port->card = card;
sca_init_port(port);
if (register_hdlc_device(dev)) {
pr_err("unable to register hdlc device\n");
port->card = NULL;
pci200_pci_remove_one(pdev);
return -ENOBUFS;
}
netdev_info(dev, "PCI200SYN channel %d\n", port->chan);
}
sca_flush(card);
return 0;
}
static const struct pci_device_id pci200_pci_tbl[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
};
static struct pci_driver pci200_pci_driver = {
.name = "PCI200SYN",
.id_table = pci200_pci_tbl,
.probe = pci200_pci_init_one,
.remove = pci200_pci_remove_one,
};
static int __init pci200_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
pr_err("Invalid PCI clock frequency\n");
return -EINVAL;
}
return pci_register_driver(&pci200_pci_driver);
}
static void __exit pci200_cleanup_module(void)
{
pci_unregister_driver(&pci200_pci_driver);
}
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("Goramo PCI200SYN serial port driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, pci200_pci_tbl);
module_param(pci_clock_freq, int, 0444);
MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
module_init(pci200_init_module);
module_exit(pci200_cleanup_module);
|
linux-master
|
drivers/net/wan/pci200syn.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* drivers/net/wan/slic_ds26522.c
*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
*
* Author:Zhao Qiang<[email protected]>
*/
#include <linux/bitrev.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/spi/spi.h>
#include <linux/wait.h>
#include <linux/param.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/io.h>
#include "slic_ds26522.h"
#define SLIC_TRANS_LEN 1
#define SLIC_TWO_LEN 2
#define SLIC_THREE_LEN 3
static struct spi_device *g_spi;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Zhao Qiang<[email protected]>");
/* the read/write format of address is
* w/r|A13|A12|A11|A10|A9|A8|A7|A6|A5|A4|A3|A2|A1|A0|x
*/
static void slic_write(struct spi_device *spi, u16 addr,
u8 data)
{
u8 temp[3];
addr = bitrev16(addr) >> 1;
data = bitrev8(data);
temp[0] = (u8)((addr >> 8) & 0x7f);
temp[1] = (u8)(addr & 0xfe);
temp[2] = data;
/* write spi addr and value */
spi_write(spi, &temp[0], SLIC_THREE_LEN);
}
static u8 slic_read(struct spi_device *spi, u16 addr)
{
u8 temp[2];
u8 data;
addr = bitrev16(addr) >> 1;
temp[0] = (u8)(((addr >> 8) & 0x7f) | 0x80);
temp[1] = (u8)(addr & 0xfe);
spi_write_then_read(spi, &temp[0], SLIC_TWO_LEN, &data,
SLIC_TRANS_LEN);
data = bitrev8(data);
return data;
}
static bool get_slic_product_code(struct spi_device *spi)
{
u8 device_id;
device_id = slic_read(spi, DS26522_IDR_ADDR);
if ((device_id & 0xf8) == 0x68)
return true;
else
return false;
}
static void ds26522_e1_spec_config(struct spi_device *spi)
{
/* Receive E1 Mode, Framer Disabled */
slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_E1);
/* Transmit E1 Mode, Framer Disable */
slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_E1);
/* Receive E1 Mode Framer Enable */
slic_write(spi, DS26522_RMMR_ADDR,
slic_read(spi, DS26522_RMMR_ADDR) | DS26522_RMMR_FRM_EN);
/* Transmit E1 Mode Framer Enable */
slic_write(spi, DS26522_TMMR_ADDR,
slic_read(spi, DS26522_TMMR_ADDR) | DS26522_TMMR_FRM_EN);
/* RCR1, receive E1 B8zs & ESF */
slic_write(spi, DS26522_RCR1_ADDR,
DS26522_RCR1_E1_HDB3 | DS26522_RCR1_E1_CCS);
/* RSYSCLK=2.048MHz, RSYNC-Output */
slic_write(spi, DS26522_RIOCR_ADDR,
DS26522_RIOCR_2048KHZ | DS26522_RIOCR_RSIO_OUT);
/* TCR1 Transmit E1 b8zs */
slic_write(spi, DS26522_TCR1_ADDR, DS26522_TCR1_TB8ZS);
/* TSYSCLK=2.048MHz, TSYNC-Output */
slic_write(spi, DS26522_TIOCR_ADDR,
DS26522_TIOCR_2048KHZ | DS26522_TIOCR_TSIO_OUT);
/* Set E1TAF */
slic_write(spi, DS26522_E1TAF_ADDR, DS26522_E1TAF_DEFAULT);
/* Set E1TNAF register */
slic_write(spi, DS26522_E1TNAF_ADDR, DS26522_E1TNAF_DEFAULT);
/* Receive E1 Mode Framer Enable & init Done */
slic_write(spi, DS26522_RMMR_ADDR, slic_read(spi, DS26522_RMMR_ADDR) |
DS26522_RMMR_INIT_DONE);
/* Transmit E1 Mode Framer Enable & init Done */
slic_write(spi, DS26522_TMMR_ADDR, slic_read(spi, DS26522_TMMR_ADDR) |
DS26522_TMMR_INIT_DONE);
/* Configure LIU E1 mode */
slic_write(spi, DS26522_LTRCR_ADDR, DS26522_LTRCR_E1);
/* E1 Mode default 75 ohm w/Transmit Impedance Matlinking */
slic_write(spi, DS26522_LTITSR_ADDR,
DS26522_LTITSR_TLIS_75OHM | DS26522_LTITSR_LBOS_75OHM);
/* E1 Mode default 75 ohm Long Haul w/Receive Impedance Matlinking */
slic_write(spi, DS26522_LRISMR_ADDR,
DS26522_LRISMR_75OHM | DS26522_LRISMR_MAX);
/* Enable Transmit output */
slic_write(spi, DS26522_LMCR_ADDR, DS26522_LMCR_TE);
}
static int slic_ds26522_init_configure(struct spi_device *spi)
{
u16 addr;
/* set clock */
slic_write(spi, DS26522_GTCCR_ADDR, DS26522_GTCCR_BPREFSEL_REFCLKIN |
DS26522_GTCCR_BFREQSEL_2048KHZ |
DS26522_GTCCR_FREQSEL_2048KHZ);
slic_write(spi, DS26522_GTCR2_ADDR, DS26522_GTCR2_TSSYNCOUT);
slic_write(spi, DS26522_GFCR_ADDR, DS26522_GFCR_BPCLK_2048KHZ);
/* set gtcr */
slic_write(spi, DS26522_GTCR1_ADDR, DS26522_GTCR1);
/* Global LIU Software Reset Register */
slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_RESET);
/* Global Framer and BERT Software Reset Register */
slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_RESET);
usleep_range(100, 120);
slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_NORMAL);
slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_NORMAL);
/* Perform RX/TX SRESET,Reset receiver */
slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_SFTRST);
/* Reset tranceiver */
slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_SFTRST);
usleep_range(100, 120);
/* Zero all Framer Registers */
for (addr = DS26522_RF_ADDR_START; addr <= DS26522_RF_ADDR_END;
addr++)
slic_write(spi, addr, 0);
for (addr = DS26522_TF_ADDR_START; addr <= DS26522_TF_ADDR_END;
addr++)
slic_write(spi, addr, 0);
for (addr = DS26522_LIU_ADDR_START; addr <= DS26522_LIU_ADDR_END;
addr++)
slic_write(spi, addr, 0);
for (addr = DS26522_BERT_ADDR_START; addr <= DS26522_BERT_ADDR_END;
addr++)
slic_write(spi, addr, 0);
/* setup ds26522 for E1 specification */
ds26522_e1_spec_config(spi);
slic_write(spi, DS26522_GTCR1_ADDR, 0x00);
return 0;
}
static void slic_ds26522_remove(struct spi_device *spi)
{
pr_info("DS26522 module uninstalled\n");
}
static int slic_ds26522_probe(struct spi_device *spi)
{
int ret = 0;
g_spi = spi;
spi->bits_per_word = 8;
if (!get_slic_product_code(spi))
return ret;
ret = slic_ds26522_init_configure(spi);
if (ret == 0)
pr_info("DS26522 cs%d configured\n", spi_get_chipselect(spi, 0));
return ret;
}
static const struct spi_device_id slic_ds26522_id[] = {
{ .name = "ds26522" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(spi, slic_ds26522_id);
static const struct of_device_id slic_ds26522_match[] = {
{
.compatible = "maxim,ds26522",
},
{},
};
MODULE_DEVICE_TABLE(of, slic_ds26522_match);
static struct spi_driver slic_ds26522_driver = {
.driver = {
.name = "ds26522",
.bus = &spi_bus_type,
.of_match_table = slic_ds26522_match,
},
.probe = slic_ds26522_probe,
.remove = slic_ds26522_remove,
.id_table = slic_ds26522_id,
};
module_spi_driver(slic_ds26522_driver);
|
linux-master
|
drivers/net/wan/slic_ds26522.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hitachi (now Renesas) SCA-II HD64572 driver for Linux
*
* Copyright (C) 1998-2008 Krzysztof Halasa <[email protected]>
*
* Source of information: HD64572 SCA-II User's Manual
*
* We use the following SCA memory map:
*
* Packet buffer descriptor rings - starting from card->rambase:
* rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
* tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
* rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
* tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
*
* Packet data buffers - starting from card->rambase + buff_offset:
* rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
* tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
* rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
* tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/hdlc.h>
#include <linux/in.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include "hd64572.h"
#define NAPI_WEIGHT 16
#define get_msci(port) ((port)->chan ? MSCI1_OFFSET : MSCI0_OFFSET)
#define get_dmac_rx(port) ((port)->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
#define get_dmac_tx(port) ((port)->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
#define sca_in(reg, card) readb((card)->scabase + (reg))
#define sca_out(value, reg, card) writeb(value, (card)->scabase + (reg))
#define sca_inw(reg, card) readw((card)->scabase + (reg))
#define sca_outw(value, reg, card) writew(value, (card)->scabase + (reg))
#define sca_inl(reg, card) readl((card)->scabase + (reg))
#define sca_outl(value, reg, card) writel(value, (card)->scabase + (reg))
static int sca_poll(struct napi_struct *napi, int budget);
static inline port_t *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
static inline void enable_intr(port_t *port)
{
/* enable DMIB and MSCI RXINTA interrupts */
sca_outl(sca_inl(IER0, port->card) |
(port->chan ? 0x08002200 : 0x00080022), IER0, port->card);
}
static inline void disable_intr(port_t *port)
{
sca_outl(sca_inl(IER0, port->card) &
(port->chan ? 0x00FF00FF : 0xFF00FF00), IER0, port->card);
}
static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
{
u16 rx_buffs = port->card->rx_ring_buffers;
u16 tx_buffs = port->card->tx_ring_buffers;
desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc;
}
static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
{
/* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
}
static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
int transmit)
{
return (pkt_desc __iomem *)(port->card->rambase +
desc_offset(port, desc, transmit));
}
static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
{
return port->card->buff_offset +
desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
}
static inline void sca_set_carrier(port_t *port)
{
if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) {
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: sca_set_carrier on\n",
port->netdev.name);
#endif
netif_carrier_on(port->netdev);
} else {
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: sca_set_carrier off\n",
port->netdev.name);
#endif
netif_carrier_off(port->netdev);
}
}
static void sca_init_port(port_t *port)
{
card_t *card = port->card;
u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port);
int transmit, i;
port->rxin = 0;
port->txin = 0;
port->txlast = 0;
for (transmit = 0; transmit < 2; transmit++) {
u16 buffs = transmit ? card->tx_ring_buffers
: card->rx_ring_buffers;
for (i = 0; i < buffs; i++) {
pkt_desc __iomem *desc = desc_address(port, i, transmit);
u16 chain_off = desc_offset(port, i + 1, transmit);
u32 buff_off = buffer_offset(port, i, transmit);
writel(chain_off, &desc->cp);
writel(buff_off, &desc->bp);
writew(0, &desc->len);
writeb(0, &desc->stat);
}
}
/* DMA disable - to halt state */
sca_out(0, DSR_RX(port->chan), card);
sca_out(0, DSR_TX(port->chan), card);
/* software ABORT - to initial state */
sca_out(DCR_ABORT, DCR_RX(port->chan), card);
sca_out(DCR_ABORT, DCR_TX(port->chan), card);
/* current desc addr */
sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card);
sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0),
dmac_rx + EDAL, card);
sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card);
sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card);
/* clear frame end interrupt counter */
sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card);
sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card);
/* Receive */
sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */
sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */
sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */
sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */
/* Transmit */
sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */
sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */
sca_set_carrier(port);
netif_napi_add_weight(port->netdev, &port->napi, sca_poll,
NAPI_WEIGHT);
}
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
{
u16 msci = get_msci(port);
card_t *card = port->card;
if (sca_in(msci + ST1, card) & ST1_CDCD) {
/* Reset MSCI CDCD status bit */
sca_out(ST1_CDCD, msci + ST1, card);
sca_set_carrier(port);
}
}
static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
u16 rxin)
{
struct net_device *dev = port->netdev;
struct sk_buff *skb;
u16 len;
u32 buff;
len = readw(&desc->len);
skb = dev_alloc_skb(len);
if (!skb) {
dev->stats.rx_dropped++;
return;
}
buff = buffer_offset(port, rxin, 0);
memcpy_fromio(skb->data, card->rambase + buff, len);
skb_put(skb, len);
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
debug_frame(skb);
#endif
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
skb->protocol = hdlc_type_trans(skb, dev);
netif_receive_skb(skb);
}
/* Receive DMA service */
static inline int sca_rx_done(port_t *port, int budget)
{
struct net_device *dev = port->netdev;
u16 dmac = get_dmac_rx(port);
card_t *card = port->card;
u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */
int received = 0;
/* Reset DSR status bits */
sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
DSR_RX(port->chan), card);
if (stat & DSR_BOF)
/* Dropped one or more frames */
dev->stats.rx_over_errors++;
while (received < budget) {
u32 desc_off = desc_offset(port, port->rxin, 0);
pkt_desc __iomem *desc;
u32 cda = sca_inl(dmac + CDAL, card);
if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
break; /* No frame received */
desc = desc_address(port, port->rxin, 0);
stat = readb(&desc->stat);
if (!(stat & ST_RX_EOM))
port->rxpart = 1; /* partial frame received */
else if ((stat & ST_ERROR_MASK) || port->rxpart) {
dev->stats.rx_errors++;
if (stat & ST_RX_OVERRUN)
dev->stats.rx_fifo_errors++;
else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
ST_RX_RESBIT)) || port->rxpart)
dev->stats.rx_frame_errors++;
else if (stat & ST_RX_CRC)
dev->stats.rx_crc_errors++;
if (stat & ST_RX_EOM)
port->rxpart = 0; /* received last fragment */
} else {
sca_rx(card, port, desc, port->rxin);
received++;
}
/* Set new error descriptor address */
sca_outl(desc_off, dmac + EDAL, card);
port->rxin = (port->rxin + 1) % card->rx_ring_buffers;
}
/* make sure RX DMA is enabled */
sca_out(DSR_DE, DSR_RX(port->chan), card);
return received;
}
/* Transmit DMA service */
static inline void sca_tx_done(port_t *port)
{
struct net_device *dev = port->netdev;
card_t *card = port->card;
u8 stat;
unsigned count = 0;
spin_lock(&port->lock);
stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */
/* Reset DSR status bits */
sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
DSR_TX(port->chan), card);
while (1) {
pkt_desc __iomem *desc = desc_address(port, port->txlast, 1);
u8 stat = readb(&desc->stat);
if (!(stat & ST_TX_OWNRSHP))
break; /* not yet transmitted */
if (stat & ST_TX_UNDRRUN) {
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
} else {
dev->stats.tx_packets++;
dev->stats.tx_bytes += readw(&desc->len);
}
writeb(0, &desc->stat); /* Free descriptor */
count++;
port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
}
if (count)
netif_wake_queue(dev);
spin_unlock(&port->lock);
}
static int sca_poll(struct napi_struct *napi, int budget)
{
port_t *port = container_of(napi, port_t, napi);
u32 isr0 = sca_inl(ISR0, port->card);
int received = 0;
if (isr0 & (port->chan ? 0x08000000 : 0x00080000))
sca_msci_intr(port);
if (isr0 & (port->chan ? 0x00002000 : 0x00000020))
sca_tx_done(port);
if (isr0 & (port->chan ? 0x00000200 : 0x00000002))
received = sca_rx_done(port, budget);
if (received < budget) {
napi_complete_done(napi, received);
enable_intr(port);
}
return received;
}
static irqreturn_t sca_intr(int irq, void *dev_id)
{
card_t *card = dev_id;
u32 isr0 = sca_inl(ISR0, card);
int i, handled = 0;
for (i = 0; i < 2; i++) {
port_t *port = get_port(card, i);
if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
handled = 1;
disable_intr(port);
napi_schedule(&port->napi);
}
}
return IRQ_RETVAL(handled);
}
static void sca_set_port(port_t *port)
{
card_t *card = port->card;
u16 msci = get_msci(port);
u8 md2 = sca_in(msci + MD2, card);
unsigned int tmc, br = 10, brv = 1024;
if (port->settings.clock_rate > 0) {
/* Try lower br for better accuracy*/
do {
br--;
brv >>= 1; /* brv = 2^9 = 512 max in specs */
/* Baud Rate = CLOCK_BASE / TMC / 2^BR */
tmc = CLOCK_BASE / brv / port->settings.clock_rate;
} while (br > 1 && tmc <= 128);
if (tmc < 1) {
tmc = 1;
br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
brv = 1;
} else if (tmc > 255) {
tmc = 256; /* tmc=0 means 256 - low baud rates */
}
port->settings.clock_rate = CLOCK_BASE / brv / tmc;
} else {
br = 9; /* Minimum clock rate */
tmc = 256; /* 8bit = 0 */
port->settings.clock_rate = CLOCK_BASE / (256 * 512);
}
port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
port->txs = (port->txs & ~CLK_BRG_MASK) | br;
port->tmc = tmc;
/* baud divisor - time constant*/
sca_out(port->tmc, msci + TMCR, card);
sca_out(port->tmc, msci + TMCT, card);
/* Set BRG bits */
sca_out(port->rxs, msci + RXS, card);
sca_out(port->txs, msci + TXS, card);
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
else
md2 &= ~MD2_LOOPBACK;
sca_out(md2, msci + MD2, card);
}
static void sca_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port->card;
u16 msci = get_msci(port);
u8 md0, md2;
switch (port->encoding) {
case ENCODING_NRZ:
md2 = MD2_NRZ;
break;
case ENCODING_NRZI:
md2 = MD2_NRZI;
break;
case ENCODING_FM_MARK:
md2 = MD2_FM_MARK;
break;
case ENCODING_FM_SPACE:
md2 = MD2_FM_SPACE;
break;
default:
md2 = MD2_MANCHESTER;
}
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
switch (port->parity) {
case PARITY_CRC16_PR0:
md0 = MD0_HDLC | MD0_CRC_16_0;
break;
case PARITY_CRC16_PR1:
md0 = MD0_HDLC | MD0_CRC_16;
break;
case PARITY_CRC32_PR1_CCITT:
md0 = MD0_HDLC | MD0_CRC_ITU32;
break;
case PARITY_CRC16_PR1_CCITT:
md0 = MD0_HDLC | MD0_CRC_ITU;
break;
default:
md0 = MD0_HDLC | MD0_CRC_NONE;
}
sca_out(CMD_RESET, msci + CMD, card);
sca_out(md0, msci + MD0, card);
sca_out(0x00, msci + MD1, card); /* no address field check */
sca_out(md2, msci + MD2, card);
sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
/* Skip the rest of underrun frame */
sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
/* We're using the following interrupts:
- RXINTA (DCD changes only)
- DMIB (EOM - single frame transfer complete)
*/
sca_outl(IE0_RXINTA | IE0_CDCD, msci + IE0, card);
sca_out(port->tmc, msci + TMCR, card);
sca_out(port->tmc, msci + TMCT, card);
sca_out(port->rxs, msci + RXS, card);
sca_out(port->txs, msci + TXS, card);
sca_out(CMD_TX_ENABLE, msci + CMD, card);
sca_out(CMD_RX_ENABLE, msci + CMD, card);
sca_set_carrier(port);
enable_intr(port);
napi_enable(&port->napi);
netif_start_queue(dev);
}
static void sca_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
/* reset channel */
sca_out(CMD_RESET, get_msci(port) + CMD, port->card);
disable_intr(port);
napi_disable(&port->napi);
netif_stop_queue(dev);
}
static int sca_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
if (encoding != ENCODING_NRZ &&
encoding != ENCODING_NRZI &&
encoding != ENCODING_FM_MARK &&
encoding != ENCODING_FM_SPACE &&
encoding != ENCODING_MANCHESTER)
return -EINVAL;
if (parity != PARITY_NONE &&
parity != PARITY_CRC16_PR0 &&
parity != PARITY_CRC16_PR1 &&
parity != PARITY_CRC32_PR1_CCITT &&
parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
dev_to_port(dev)->encoding = encoding;
dev_to_port(dev)->parity = parity;
return 0;
}
#ifdef DEBUG_RINGS
static void sca_dump_rings(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port->card;
u16 cnt;
printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
sca_inl(get_dmac_rx(port) + CDAL, card),
sca_inl(get_dmac_rx(port) + EDAL, card),
sca_in(DSR_RX(port->chan), card), port->rxin,
sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
pr_cont("\n");
printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
"last=%u %sactive",
sca_inl(get_dmac_tx(port) + CDAL, card),
sca_inl(get_dmac_tx(port) + EDAL, card),
sca_in(DSR_TX(port->chan), card), port->txin, port->txlast,
sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++)
pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
pr_cont("\n");
printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x,"
" ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n",
sca_in(get_msci(port) + MD0, card),
sca_in(get_msci(port) + MD1, card),
sca_in(get_msci(port) + MD2, card),
sca_in(get_msci(port) + ST0, card),
sca_in(get_msci(port) + ST1, card),
sca_in(get_msci(port) + ST2, card),
sca_in(get_msci(port) + ST3, card),
sca_in(get_msci(port) + ST4, card),
sca_in(get_msci(port) + FST, card),
sca_in(get_msci(port) + CST0, card),
sca_in(get_msci(port) + CST1, card));
printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
sca_inl(ISR0, card), sca_inl(ISR1, card));
}
#endif /* DEBUG_RINGS */
static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port->card;
pkt_desc __iomem *desc;
u32 buff, len;
spin_lock_irq(&port->lock);
desc = desc_address(port, port->txin + 1, 1);
BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
debug_frame(skb);
#endif
desc = desc_address(port, port->txin, 1);
buff = buffer_offset(port, port->txin, 1);
len = skb->len;
memcpy_toio(card->rambase + buff, skb->data, len);
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
port->txin = (port->txin + 1) % card->tx_ring_buffers;
sca_outl(desc_offset(port, port->txin, 1),
get_dmac_tx(port) + EDAL, card);
sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */
desc = desc_address(port, port->txin + 1, 1);
if (readb(&desc->stat)) /* allow 1 packet gap */
netif_stop_queue(dev);
spin_unlock_irq(&port->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
{
/* Round RAM size to 32 bits, fill from end to start */
u32 i = ramsize &= ~3;
do {
i -= 4;
writel(i ^ 0x12345678, rambase + i);
} while (i > 0);
for (i = 0; i < ramsize ; i += 4) {
if (readl(rambase + i) != (i ^ 0x12345678))
break;
}
return i;
}
static void sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
sca_out(wait_states, WCRM, card);
sca_out(wait_states, WCRH, card);
sca_out(0, DMER, card); /* DMA Master disable */
sca_out(0x03, PCR, card); /* DMA priority */
sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
sca_out(0, DSR_TX(0), card);
sca_out(0, DSR_RX(1), card);
sca_out(0, DSR_TX(1), card);
sca_out(DMER_DME, DMER, card); /* DMA Master enable */
}
|
linux-master
|
drivers/net/wan/hd64572.c
|
/*
* Routines to compress and uncompress tcp packets (for transmission
* over low speed serial lines).
*
* Copyright (c) 1989 Regents of the University of California.
* All rights reserved.
*
* Redistribution and use in source and binary forms are permitted
* provided that the above copyright notice and this paragraph are
* duplicated in all such forms and that any documentation,
* advertising materials, and other materials related to such
* distribution and use acknowledge that the software was developed
* by the University of California, Berkeley. The name of the
* University may not be used to endorse or promote products derived
* from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Van Jacobson ([email protected]), Dec 31, 1989:
* - Initial distribution.
*
*
* modified for KA9Q Internet Software Package by
* Katie Stevens ([email protected])
* University of California, Davis
* Computing Services
* - 01-31-90 initial adaptation (from 1.19)
* PPP.05 02-15-90 [ks]
* PPP.08 05-02-90 [ks] use PPP protocol field to signal compression
* PPP.15 09-90 [ks] improve mbuf handling
* PPP.16 11-02 [karn] substantially rewritten to use NOS facilities
*
* - Feb 1991 [email protected]
* variable number of conversation slots
* allow zero or one slots
* separate routines
* status display
* - Jul 1994 Dmitry Gorodchanin
* Fixes for memory leaks.
* - Oct 1994 Dmitry Gorodchanin
* Modularization.
* - Jan 1995 Bjorn Ekwall
* Use ip_fast_csum from ip.h
* - July 1995 Christos A. Polyzols
* Spotted bug in tcp option checking
*
*
* This module is a difficult issue. It's clearly inet code but it's also clearly
* driver code belonging close to PPP and SLIP
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <net/slhc_vj.h>
#ifdef CONFIG_INET
/* Entire module is for IP only */
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/termios.h>
#include <linux/in.h>
#include <linux/fcntl.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <net/checksum.h>
#include <asm/unaligned.h>
static unsigned char *encode(unsigned char *cp, unsigned short n);
static long decode(unsigned char **cpp);
static unsigned char * put16(unsigned char *cp, unsigned short x);
static unsigned short pull16(unsigned char **cpp);
/* Allocate compression data structure
* slots must be in range 0 to 255 (zero meaning no compression)
* Returns pointer to structure or ERR_PTR() on error.
*/
struct slcompress *
slhc_init(int rslots, int tslots)
{
short i;
struct cstate *ts;
struct slcompress *comp;
if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
return ERR_PTR(-EINVAL);
comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
if (! comp)
goto out_fail;
if (rslots > 0) {
size_t rsize = rslots * sizeof(struct cstate);
comp->rstate = kzalloc(rsize, GFP_KERNEL);
if (! comp->rstate)
goto out_free;
comp->rslot_limit = rslots - 1;
}
if (tslots > 0) {
size_t tsize = tslots * sizeof(struct cstate);
comp->tstate = kzalloc(tsize, GFP_KERNEL);
if (! comp->tstate)
goto out_free2;
comp->tslot_limit = tslots - 1;
}
comp->xmit_oldest = 0;
comp->xmit_current = 255;
comp->recv_current = 255;
/*
* don't accept any packets with implicit index until we get
* one with an explicit index. Otherwise the uncompress code
* will try to use connection 255, which is almost certainly
* out of range
*/
comp->flags |= SLF_TOSS;
if ( tslots > 0 ) {
ts = comp->tstate;
for(i = comp->tslot_limit; i > 0; --i){
ts[i].cs_this = i;
ts[i].next = &(ts[i - 1]);
}
ts[0].next = &(ts[comp->tslot_limit]);
ts[0].cs_this = 0;
}
return comp;
out_free2:
kfree(comp->rstate);
out_free:
kfree(comp);
out_fail:
return ERR_PTR(-ENOMEM);
}
/* Free a compression data structure */
void
slhc_free(struct slcompress *comp)
{
if ( IS_ERR_OR_NULL(comp) )
return;
if ( comp->tstate != NULLSLSTATE )
kfree( comp->tstate );
if ( comp->rstate != NULLSLSTATE )
kfree( comp->rstate );
kfree( comp );
}
/* Put a short in host order into a char array in network order */
static inline unsigned char *
put16(unsigned char *cp, unsigned short x)
{
*cp++ = x >> 8;
*cp++ = x;
return cp;
}
/* Encode a number */
static unsigned char *
encode(unsigned char *cp, unsigned short n)
{
if(n >= 256 || n == 0){
*cp++ = 0;
cp = put16(cp,n);
} else {
*cp++ = n;
}
return cp;
}
/* Pull a 16-bit integer in host order from buffer in network byte order */
static unsigned short
pull16(unsigned char **cpp)
{
short rval;
rval = *(*cpp)++;
rval <<= 8;
rval |= *(*cpp)++;
return rval;
}
/* Decode a number */
static long
decode(unsigned char **cpp)
{
int x;
x = *(*cpp)++;
if(x == 0){
return pull16(cpp) & 0xffff; /* pull16 returns -1 on error */
} else {
return x & 0xff; /* -1 if PULLCHAR returned error */
}
}
/*
* icp and isize are the original packet.
* ocp is a place to put a copy if necessary.
* cpp is initially a pointer to icp. If the copy is used,
* change it to ocp.
*/
int
slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
unsigned char *ocp, unsigned char **cpp, int compress_cid)
{
struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]);
struct cstate *lcs = ocs;
struct cstate *cs = lcs->next;
unsigned long deltaS, deltaA;
short changes = 0;
int nlen, hlen;
unsigned char new_seq[16];
unsigned char *cp = new_seq;
struct iphdr *ip;
struct tcphdr *th, *oth;
__sum16 csum;
/*
* Don't play with runt packets.
*/
if(isize<sizeof(struct iphdr))
return isize;
ip = (struct iphdr *) icp;
if (ip->version != 4 || ip->ihl < 5)
return isize;
/* Bail if this packet isn't TCP, or is an IP fragment */
if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
/* Send as regular IP */
if(ip->protocol != IPPROTO_TCP)
comp->sls_o_nontcp++;
else
comp->sls_o_tcp++;
return isize;
}
nlen = ip->ihl * 4;
if (isize < nlen + sizeof(*th))
return isize;
th = (struct tcphdr *)(icp + nlen);
if (th->doff < sizeof(struct tcphdr) / 4)
return isize;
hlen = nlen + th->doff * 4;
/* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
* some other control bit is set). Also uncompressible if
* it's a runt.
*/
if(hlen > isize || th->syn || th->fin || th->rst ||
! (th->ack)){
/* TCP connection stuff; send as regular IP */
comp->sls_o_tcp++;
return isize;
}
/*
* Packet is compressible -- we're going to send either a
* COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way,
* we need to locate (or create) the connection state.
*
* States are kept in a circularly linked list with
* xmit_oldest pointing to the end of the list. The
* list is kept in lru order by moving a state to the
* head of the list whenever it is referenced. Since
* the list is short and, empirically, the connection
* we want is almost always near the front, we locate
* states via linear search. If we don't find a state
* for the datagram, the oldest state is (re-)used.
*/
for ( ; ; ) {
if( ip->saddr == cs->cs_ip.saddr
&& ip->daddr == cs->cs_ip.daddr
&& th->source == cs->cs_tcp.source
&& th->dest == cs->cs_tcp.dest)
goto found;
/* if current equal oldest, at end of list */
if ( cs == ocs )
break;
lcs = cs;
cs = cs->next;
comp->sls_o_searches++;
}
/*
* Didn't find it -- re-use oldest cstate. Send an
* uncompressed packet that tells the other side what
* connection number we're using for this conversation.
*
* Note that since the state list is circular, the oldest
* state points to the newest and we only need to set
* xmit_oldest to update the lru linkage.
*/
comp->sls_o_misses++;
comp->xmit_oldest = lcs->cs_this;
goto uncompressed;
found:
/*
* Found it -- move to the front on the connection list.
*/
if(lcs == ocs) {
/* found at most recently used */
} else if (cs == ocs) {
/* found at least recently used */
comp->xmit_oldest = lcs->cs_this;
} else {
/* more than 2 elements */
lcs->next = cs->next;
cs->next = ocs->next;
ocs->next = cs;
}
/*
* Make sure that only what we expect to change changed.
* Check the following:
* IP protocol version, header length & type of service.
* The "Don't fragment" bit.
* The time-to-live field.
* The TCP header length.
* IP options, if any.
* TCP options, if any.
* If any of these things are different between the previous &
* current datagram, we send the current datagram `uncompressed'.
*/
oth = &cs->cs_tcp;
if(ip->version != cs->cs_ip.version || ip->ihl != cs->cs_ip.ihl
|| ip->tos != cs->cs_ip.tos
|| (ip->frag_off & htons(0x4000)) != (cs->cs_ip.frag_off & htons(0x4000))
|| ip->ttl != cs->cs_ip.ttl
|| th->doff != cs->cs_tcp.doff
|| (ip->ihl > 5 && memcmp(ip+1,cs->cs_ipopt,((ip->ihl)-5)*4) != 0)
|| (th->doff > 5 && memcmp(th+1,cs->cs_tcpopt,((th->doff)-5)*4) != 0)){
goto uncompressed;
}
/*
* Figure out which of the changing fields changed. The
* receiver expects changes in the order: urgent, window,
* ack, seq (the order minimizes the number of temporaries
* needed in this section of code).
*/
if(th->urg){
deltaS = ntohs(th->urg_ptr);
cp = encode(cp,deltaS);
changes |= NEW_U;
} else if(th->urg_ptr != oth->urg_ptr){
/* argh! URG not set but urp changed -- a sensible
* implementation should never do this but RFC793
* doesn't prohibit the change so we have to deal
* with it. */
goto uncompressed;
}
if((deltaS = ntohs(th->window) - ntohs(oth->window)) != 0){
cp = encode(cp,deltaS);
changes |= NEW_W;
}
if((deltaA = ntohl(th->ack_seq) - ntohl(oth->ack_seq)) != 0L){
if(deltaA > 0x0000ffff)
goto uncompressed;
cp = encode(cp,deltaA);
changes |= NEW_A;
}
if((deltaS = ntohl(th->seq) - ntohl(oth->seq)) != 0L){
if(deltaS > 0x0000ffff)
goto uncompressed;
cp = encode(cp,deltaS);
changes |= NEW_S;
}
switch(changes){
case 0: /* Nothing changed. If this packet contains data and the
* last one didn't, this is probably a data packet following
* an ack (normal on an interactive connection) and we send
* it compressed. Otherwise it's probably a retransmit,
* retransmitted ack or window probe. Send it uncompressed
* in case the other side missed the compressed version.
*/
if(ip->tot_len != cs->cs_ip.tot_len &&
ntohs(cs->cs_ip.tot_len) == hlen)
break;
goto uncompressed;
case SPECIAL_I:
case SPECIAL_D:
/* actual changes match one of our special case encodings --
* send packet uncompressed.
*/
goto uncompressed;
case NEW_S|NEW_A:
if(deltaS == deltaA &&
deltaS == ntohs(cs->cs_ip.tot_len) - hlen){
/* special case for echoed terminal traffic */
changes = SPECIAL_I;
cp = new_seq;
}
break;
case NEW_S:
if(deltaS == ntohs(cs->cs_ip.tot_len) - hlen){
/* special case for data xfer */
changes = SPECIAL_D;
cp = new_seq;
}
break;
}
deltaS = ntohs(ip->id) - ntohs(cs->cs_ip.id);
if(deltaS != 1){
cp = encode(cp,deltaS);
changes |= NEW_I;
}
if(th->psh)
changes |= TCP_PUSH_BIT;
/* Grab the cksum before we overwrite it below. Then update our
* state with this packet's header.
*/
csum = th->check;
memcpy(&cs->cs_ip,ip,20);
memcpy(&cs->cs_tcp,th,20);
/* We want to use the original packet as our compressed packet.
* (cp - new_seq) is the number of bytes we need for compressed
* sequence numbers. In addition we need one byte for the change
* mask, one for the connection id and two for the tcp checksum.
* So, (cp - new_seq) + 4 bytes of header are needed.
*/
deltaS = cp - new_seq;
if(compress_cid == 0 || comp->xmit_current != cs->cs_this){
cp = ocp;
*cpp = ocp;
*cp++ = changes | NEW_C;
*cp++ = cs->cs_this;
comp->xmit_current = cs->cs_this;
} else {
cp = ocp;
*cpp = ocp;
*cp++ = changes;
}
*(__sum16 *)cp = csum;
cp += 2;
/* deltaS is now the size of the change section of the compressed header */
memcpy(cp,new_seq,deltaS); /* Write list of deltas */
memcpy(cp+deltaS,icp+hlen,isize-hlen);
comp->sls_o_compressed++;
ocp[0] |= SL_TYPE_COMPRESSED_TCP;
return isize - hlen + deltaS + (cp - ocp);
/* Update connection state cs & send uncompressed packet (i.e.,
* a regular ip/tcp packet but with the 'conversation id' we hope
* to use on future compressed packets in the protocol field).
*/
uncompressed:
memcpy(&cs->cs_ip,ip,20);
memcpy(&cs->cs_tcp,th,20);
if (ip->ihl > 5)
memcpy(cs->cs_ipopt, ip+1, ((ip->ihl) - 5) * 4);
if (th->doff > 5)
memcpy(cs->cs_tcpopt, th+1, ((th->doff) - 5) * 4);
comp->xmit_current = cs->cs_this;
comp->sls_o_uncompressed++;
memcpy(ocp, icp, isize);
*cpp = ocp;
ocp[9] = cs->cs_this;
ocp[0] |= SL_TYPE_UNCOMPRESSED_TCP;
return isize;
}
int
slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
{
int changes;
long x;
struct tcphdr *thp;
struct iphdr *ip;
struct cstate *cs;
int len, hdrlen;
unsigned char *cp = icp;
/* We've got a compressed packet; read the change byte */
comp->sls_i_compressed++;
if(isize < 3){
comp->sls_i_error++;
return 0;
}
changes = *cp++;
if(changes & NEW_C){
/* Make sure the state index is in range, then grab the state.
* If we have a good state index, clear the 'discard' flag.
*/
x = *cp++; /* Read conn index */
if(x < 0 || x > comp->rslot_limit)
goto bad;
/* Check if the cstate is initialized */
if (!comp->rstate[x].initialized)
goto bad;
comp->flags &=~ SLF_TOSS;
comp->recv_current = x;
} else {
/* this packet has an implicit state index. If we've
* had a line error since the last time we got an
* explicit state index, we have to toss the packet. */
if(comp->flags & SLF_TOSS){
comp->sls_i_tossed++;
return 0;
}
}
cs = &comp->rstate[comp->recv_current];
thp = &cs->cs_tcp;
ip = &cs->cs_ip;
thp->check = *(__sum16 *)cp;
cp += 2;
thp->psh = (changes & TCP_PUSH_BIT) ? 1 : 0;
/*
* we can use the same number for the length of the saved header and
* the current one, because the packet wouldn't have been sent
* as compressed unless the options were the same as the previous one
*/
hdrlen = ip->ihl * 4 + thp->doff * 4;
switch(changes & SPECIALS_MASK){
case SPECIAL_I: /* Echoed terminal traffic */
{
short i;
i = ntohs(ip->tot_len) - hdrlen;
thp->ack_seq = htonl( ntohl(thp->ack_seq) + i);
thp->seq = htonl( ntohl(thp->seq) + i);
}
break;
case SPECIAL_D: /* Unidirectional data */
thp->seq = htonl( ntohl(thp->seq) +
ntohs(ip->tot_len) - hdrlen);
break;
default:
if(changes & NEW_U){
thp->urg = 1;
if((x = decode(&cp)) == -1) {
goto bad;
}
thp->urg_ptr = htons(x);
} else
thp->urg = 0;
if(changes & NEW_W){
if((x = decode(&cp)) == -1) {
goto bad;
}
thp->window = htons( ntohs(thp->window) + x);
}
if(changes & NEW_A){
if((x = decode(&cp)) == -1) {
goto bad;
}
thp->ack_seq = htonl( ntohl(thp->ack_seq) + x);
}
if(changes & NEW_S){
if((x = decode(&cp)) == -1) {
goto bad;
}
thp->seq = htonl( ntohl(thp->seq) + x);
}
break;
}
if(changes & NEW_I){
if((x = decode(&cp)) == -1) {
goto bad;
}
ip->id = htons (ntohs (ip->id) + x);
} else
ip->id = htons (ntohs (ip->id) + 1);
/*
* At this point, cp points to the first byte of data in the
* packet. Put the reconstructed TCP and IP headers back on the
* packet. Recalculate IP checksum (but not TCP checksum).
*/
len = isize - (cp - icp);
if (len < 0)
goto bad;
len += hdrlen;
ip->tot_len = htons(len);
ip->check = 0;
memmove(icp + hdrlen, cp, len - hdrlen);
cp = icp;
memcpy(cp, ip, 20);
cp += 20;
if (ip->ihl > 5) {
memcpy(cp, cs->cs_ipopt, (ip->ihl - 5) * 4);
cp += (ip->ihl - 5) * 4;
}
put_unaligned(ip_fast_csum(icp, ip->ihl),
&((struct iphdr *)icp)->check);
memcpy(cp, thp, 20);
cp += 20;
if (thp->doff > 5) {
memcpy(cp, cs->cs_tcpopt, ((thp->doff) - 5) * 4);
cp += ((thp->doff) - 5) * 4;
}
return len;
bad:
comp->sls_i_error++;
return slhc_toss( comp );
}
int
slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
{
struct cstate *cs;
unsigned ihl;
unsigned char index;
if(isize < 20) {
/* The packet is shorter than a legal IP header */
comp->sls_i_runt++;
return slhc_toss( comp );
}
/* Peek at the IP header's IHL field to find its length */
ihl = icp[0] & 0xf;
if(ihl < 20 / 4){
/* The IP header length field is too small */
comp->sls_i_runt++;
return slhc_toss( comp );
}
index = icp[9];
icp[9] = IPPROTO_TCP;
if (ip_fast_csum(icp, ihl)) {
/* Bad IP header checksum; discard */
comp->sls_i_badcheck++;
return slhc_toss( comp );
}
if(index > comp->rslot_limit) {
comp->sls_i_error++;
return slhc_toss(comp);
}
/* Update local state */
cs = &comp->rstate[comp->recv_current = index];
comp->flags &=~ SLF_TOSS;
memcpy(&cs->cs_ip,icp,20);
memcpy(&cs->cs_tcp,icp + ihl*4,20);
if (ihl > 5)
memcpy(cs->cs_ipopt, icp + sizeof(struct iphdr), (ihl - 5) * 4);
if (cs->cs_tcp.doff > 5)
memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
cs->initialized = true;
/* Put headers back on packet
* Neither header checksum is recalculated
*/
comp->sls_i_uncompressed++;
return isize;
}
int
slhc_toss(struct slcompress *comp)
{
if ( comp == NULLSLCOMPR )
return 0;
comp->flags |= SLF_TOSS;
return 0;
}
#else /* CONFIG_INET */
int
slhc_toss(struct slcompress *comp)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_toss");
return -EINVAL;
}
int
slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_uncompress");
return -EINVAL;
}
int
slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
unsigned char *ocp, unsigned char **cpp, int compress_cid)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_compress");
return -EINVAL;
}
int
slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_remember");
return -EINVAL;
}
void
slhc_free(struct slcompress *comp)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free");
}
struct slcompress *
slhc_init(int rslots, int tslots)
{
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
return NULL;
}
#endif /* CONFIG_INET */
/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
MODULE_LICENSE("Dual BSD/GPL");
|
linux-master
|
drivers/net/slip/slhc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* slip.c This module implements the SLIP protocol for kernel-based
* devices like TTY. It interfaces between a raw TTY, and the
* kernel's INET protocol layers.
*
* Version: @(#)slip.c 0.8.3 12/24/94
*
* Authors: Laurence Culhane, <[email protected]>
* Fred N. van Kempen, <[email protected]>
*
* Fixes:
* Alan Cox : Sanity checks and avoid tx overruns.
* Has a new sl->mtu field.
* Alan Cox : Found cause of overrun. ifconfig sl0
* mtu upwards. Driver now spots this
* and grows/shrinks its buffers(hack!).
* Memory leak if you run out of memory
* setting up a slip driver fixed.
* Matt Dillon : Printable slip (borrowed from NET2E)
* Pauline Middelink : Slip driver fixes.
* Alan Cox : Honours the old SL_COMPRESSED flag
* Alan Cox : KISS AX.25 and AXUI IP support
* Michael Riepe : Automatic CSLIP recognition added
* Charles Hedrick : CSLIP header length problem fix.
* Alan Cox : Corrected non-IP cases of the above.
* Alan Cox : Now uses hardware type as per FvK.
* Alan Cox : Default to 192.168.0.0 (RFC 1597)
* A.N.Kuznetsov : dev_tint() recursion fix.
* Dmitry Gorodchanin : SLIP memory leaks
* Dmitry Gorodchanin : Code cleanup. Reduce tty driver
* buffering from 4096 to 256 bytes.
* Improving SLIP response time.
* CONFIG_SLIP_MODE_SLIP6.
* ifconfig sl? up & down now works
* correctly.
* Modularization.
* Alan Cox : Oops - fix AX.25 buffer lengths
* Dmitry Gorodchanin : Even more cleanups. Preserve CSLIP
* statistics. Include CSLIP code only
* if it really needed.
* Alan Cox : Free slhc buffers in the right place.
* Alan Cox : Allow for digipeated IP over AX.25
* Matti Aarnio : Dynamic SLIP devices, with ideas taken
* from Jim Freeman's <[email protected]>
* dynamic PPP devices. We do NOT kfree()
* device entries, just reg./unreg. them
* as they are needed. We kfree() them
* at module cleanup.
* With MODULE-loading ``insmod'', user
* can issue parameter: slip_maxdev=1024
* (Or how much he/she wants.. Default
* is 256)
* Stanislav Voronyi : Slip line checking, with ideas taken
* from multislip BSDI driver which was
* written by Igor Chechik, RELCOM Corp.
* Only algorithms have been ported to
* Linux SLIP driver.
* Vitaly E. Lavrov : Sane behaviour on tty hangup.
* Alexey Kuznetsov : Cleanup interfaces to tty & netdevice
* modules.
*/
#define SL_CHECK_TRANSMIT
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/sched/signal.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/if_slip.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "slip.h"
#ifdef CONFIG_INET
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/slhc_vj.h>
#endif
#define SLIP_VERSION "0.8.4-NET3.019-NEWTTY"
static struct net_device **slip_devs;
static int slip_maxdev = SL_NRUNIT;
module_param(slip_maxdev, int, 0);
MODULE_PARM_DESC(slip_maxdev, "Maximum number of slip devices");
static int slip_esc(unsigned char *p, unsigned char *d, int len);
static void slip_unesc(struct slip *sl, unsigned char c);
#ifdef CONFIG_SLIP_MODE_SLIP6
static int slip_esc6(unsigned char *p, unsigned char *d, int len);
static void slip_unesc6(struct slip *sl, unsigned char c);
#endif
#ifdef CONFIG_SLIP_SMART
static void sl_keepalive(struct timer_list *t);
static void sl_outfill(struct timer_list *t);
static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd);
#endif
/********************************
* Buffer administration routines:
* sl_alloc_bufs()
* sl_free_bufs()
* sl_realloc_bufs()
*
* NOTE: sl_realloc_bufs != sl_free_bufs + sl_alloc_bufs, because
* sl_realloc_bufs provides strong atomicity and reallocation
* on actively running device.
*********************************/
/*
Allocate channel buffers.
*/
static int sl_alloc_bufs(struct slip *sl, int mtu)
{
int err = -ENOBUFS;
unsigned long len;
char *rbuff = NULL;
char *xbuff = NULL;
#ifdef SL_INCLUDE_CSLIP
char *cbuff = NULL;
struct slcompress *slcomp = NULL;
#endif
/*
* Allocate the SLIP frame buffers:
*
* rbuff Receive buffer.
* xbuff Transmit buffer.
* cbuff Temporary compression buffer.
*/
len = mtu * 2;
/*
* allow for arrival of larger UDP packets, even if we say not to
* also fixes a bug in which SunOS sends 512-byte packets even with
* an MSS of 128
*/
if (len < 576 * 2)
len = 576 * 2;
rbuff = kmalloc(len + 4, GFP_KERNEL);
if (rbuff == NULL)
goto err_exit;
xbuff = kmalloc(len + 4, GFP_KERNEL);
if (xbuff == NULL)
goto err_exit;
#ifdef SL_INCLUDE_CSLIP
cbuff = kmalloc(len + 4, GFP_KERNEL);
if (cbuff == NULL)
goto err_exit;
slcomp = slhc_init(16, 16);
if (IS_ERR(slcomp))
goto err_exit;
#endif
spin_lock_bh(&sl->lock);
if (sl->tty == NULL) {
spin_unlock_bh(&sl->lock);
err = -ENODEV;
goto err_exit;
}
sl->mtu = mtu;
sl->buffsize = len;
sl->rcount = 0;
sl->xleft = 0;
rbuff = xchg(&sl->rbuff, rbuff);
xbuff = xchg(&sl->xbuff, xbuff);
#ifdef SL_INCLUDE_CSLIP
cbuff = xchg(&sl->cbuff, cbuff);
slcomp = xchg(&sl->slcomp, slcomp);
#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
sl->xdata = 0;
sl->xbits = 0;
#endif
spin_unlock_bh(&sl->lock);
err = 0;
/* Cleanup */
err_exit:
#ifdef SL_INCLUDE_CSLIP
kfree(cbuff);
slhc_free(slcomp);
#endif
kfree(xbuff);
kfree(rbuff);
return err;
}
/* Free a SLIP channel buffers. */
static void sl_free_bufs(struct slip *sl)
{
/* Free all SLIP frame buffers. */
kfree(xchg(&sl->rbuff, NULL));
kfree(xchg(&sl->xbuff, NULL));
#ifdef SL_INCLUDE_CSLIP
kfree(xchg(&sl->cbuff, NULL));
slhc_free(xchg(&sl->slcomp, NULL));
#endif
}
/*
Reallocate slip channel buffers.
*/
static int sl_realloc_bufs(struct slip *sl, int mtu)
{
int err = 0;
struct net_device *dev = sl->dev;
unsigned char *xbuff, *rbuff;
#ifdef SL_INCLUDE_CSLIP
unsigned char *cbuff;
#endif
int len = mtu * 2;
/*
* allow for arrival of larger UDP packets, even if we say not to
* also fixes a bug in which SunOS sends 512-byte packets even with
* an MSS of 128
*/
if (len < 576 * 2)
len = 576 * 2;
xbuff = kmalloc(len + 4, GFP_ATOMIC);
rbuff = kmalloc(len + 4, GFP_ATOMIC);
#ifdef SL_INCLUDE_CSLIP
cbuff = kmalloc(len + 4, GFP_ATOMIC);
#endif
#ifdef SL_INCLUDE_CSLIP
if (xbuff == NULL || rbuff == NULL || cbuff == NULL) {
#else
if (xbuff == NULL || rbuff == NULL) {
#endif
if (mtu > sl->mtu) {
printk(KERN_WARNING "%s: unable to grow slip buffers, MTU change cancelled.\n",
dev->name);
err = -ENOBUFS;
}
goto done;
}
spin_lock_bh(&sl->lock);
err = -ENODEV;
if (sl->tty == NULL)
goto done_on_bh;
xbuff = xchg(&sl->xbuff, xbuff);
rbuff = xchg(&sl->rbuff, rbuff);
#ifdef SL_INCLUDE_CSLIP
cbuff = xchg(&sl->cbuff, cbuff);
#endif
if (sl->xleft) {
if (sl->xleft <= len) {
memcpy(sl->xbuff, sl->xhead, sl->xleft);
} else {
sl->xleft = 0;
dev->stats.tx_dropped++;
}
}
sl->xhead = sl->xbuff;
if (sl->rcount) {
if (sl->rcount <= len) {
memcpy(sl->rbuff, rbuff, sl->rcount);
} else {
sl->rcount = 0;
dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
sl->mtu = mtu;
dev->mtu = mtu;
sl->buffsize = len;
err = 0;
done_on_bh:
spin_unlock_bh(&sl->lock);
done:
kfree(xbuff);
kfree(rbuff);
#ifdef SL_INCLUDE_CSLIP
kfree(cbuff);
#endif
return err;
}
/* Set the "sending" flag. This must be atomic hence the set_bit. */
static inline void sl_lock(struct slip *sl)
{
netif_stop_queue(sl->dev);
}
/* Clear the "sending" flag. This must be atomic, hence the ASM. */
static inline void sl_unlock(struct slip *sl)
{
netif_wake_queue(sl->dev);
}
/* Send one completely decapsulated IP datagram to the IP layer. */
static void sl_bump(struct slip *sl)
{
struct net_device *dev = sl->dev;
struct sk_buff *skb;
int count;
count = sl->rcount;
#ifdef SL_INCLUDE_CSLIP
if (sl->mode & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) {
unsigned char c = sl->rbuff[0];
if (c & SL_TYPE_COMPRESSED_TCP) {
/* ignore compressed packets when CSLIP is off */
if (!(sl->mode & SL_MODE_CSLIP)) {
printk(KERN_WARNING "%s: compressed packet ignored\n", dev->name);
return;
}
/* make sure we've reserved enough space for uncompress
to use */
if (count + 80 > sl->buffsize) {
dev->stats.rx_over_errors++;
return;
}
count = slhc_uncompress(sl->slcomp, sl->rbuff, count);
if (count <= 0)
return;
} else if (c >= SL_TYPE_UNCOMPRESSED_TCP) {
if (!(sl->mode & SL_MODE_CSLIP)) {
/* turn on header compression */
sl->mode |= SL_MODE_CSLIP;
sl->mode &= ~SL_MODE_ADAPTIVE;
printk(KERN_INFO "%s: header compression turned on\n", dev->name);
}
sl->rbuff[0] &= 0x4f;
if (slhc_remember(sl->slcomp, sl->rbuff, count) <= 0)
return;
}
}
#endif /* SL_INCLUDE_CSLIP */
dev->stats.rx_bytes += count;
skb = dev_alloc_skb(count);
if (skb == NULL) {
printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
return;
}
skb->dev = dev;
skb_put_data(skb, sl->rbuff, count);
skb_reset_mac_header(skb);
skb->protocol = htons(ETH_P_IP);
netif_rx(skb);
dev->stats.rx_packets++;
}
/* Encapsulate one IP datagram and stuff into a TTY queue. */
static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
{
unsigned char *p;
int actual, count;
if (len > sl->mtu) { /* Sigh, shouldn't occur BUT ... */
printk(KERN_WARNING "%s: truncating oversized transmit packet!\n", sl->dev->name);
sl->dev->stats.tx_dropped++;
sl_unlock(sl);
return;
}
p = icp;
#ifdef SL_INCLUDE_CSLIP
if (sl->mode & SL_MODE_CSLIP)
len = slhc_compress(sl->slcomp, p, len, sl->cbuff, &p, 1);
#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
if (sl->mode & SL_MODE_SLIP6)
count = slip_esc6(p, sl->xbuff, len);
else
#endif
count = slip_esc(p, sl->xbuff, len);
/* Order of next two lines is *very* important.
* When we are sending a little amount of data,
* the transfer may be completed inside the ops->write()
* routine, because it's running with interrupts enabled.
* In this case we *never* got WRITE_WAKEUP event,
* if we did not request it before write operation.
* 14 Oct 1994 Dmitry Gorodchanin.
*/
set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
#ifdef SL_CHECK_TRANSMIT
netif_trans_update(sl->dev);
#endif
sl->xleft = count - actual;
sl->xhead = sl->xbuff + actual;
#ifdef CONFIG_SLIP_SMART
/* VSV */
clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */
#endif
}
/* Write out any remaining transmit buffer. Scheduled when tty is writable */
static void slip_transmit(struct work_struct *work)
{
struct slip *sl = container_of(work, struct slip, tx_work);
int actual;
spin_lock_bh(&sl->lock);
/* First make sure we're connected. */
if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
spin_unlock_bh(&sl->lock);
return;
}
if (sl->xleft <= 0) {
/* Now serial buffer is almost free & we can start
* transmission of another packet */
sl->dev->stats.tx_packets++;
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
spin_unlock_bh(&sl->lock);
sl_unlock(sl);
return;
}
actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
sl->xleft -= actual;
sl->xhead += actual;
spin_unlock_bh(&sl->lock);
}
/*
* Called by the driver when there's room for more data.
* Schedule the transmit.
*/
static void slip_write_wakeup(struct tty_struct *tty)
{
struct slip *sl;
rcu_read_lock();
sl = rcu_dereference(tty->disc_data);
if (sl)
schedule_work(&sl->tx_work);
rcu_read_unlock();
}
static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct slip *sl = netdev_priv(dev);
spin_lock(&sl->lock);
if (netif_queue_stopped(dev)) {
if (!netif_running(dev) || !sl->tty)
goto out;
/* May be we must check transmitter timeout here ?
* 14 Oct 1994 Dmitry Gorodchanin.
*/
#ifdef SL_CHECK_TRANSMIT
if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
/* 20 sec timeout not reached */
goto out;
}
printk(KERN_WARNING "%s: transmit timed out, %s?\n",
dev->name,
(tty_chars_in_buffer(sl->tty) || sl->xleft) ?
"bad line quality" : "driver error");
sl->xleft = 0;
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
sl_unlock(sl);
#endif
}
out:
spin_unlock(&sl->lock);
}
/* Encapsulate an IP datagram and kick it into a TTY queue. */
static netdev_tx_t
sl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct slip *sl = netdev_priv(dev);
spin_lock(&sl->lock);
if (!netif_running(dev)) {
spin_unlock(&sl->lock);
printk(KERN_WARNING "%s: xmit call when iface is down\n", dev->name);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
if (sl->tty == NULL) {
spin_unlock(&sl->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
sl_lock(sl);
dev->stats.tx_bytes += skb->len;
sl_encaps(sl, skb->data, skb->len);
spin_unlock(&sl->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/******************************************
* Routines looking at netdevice side.
******************************************/
/* Netdevice UP -> DOWN routine */
static int
sl_close(struct net_device *dev)
{
struct slip *sl = netdev_priv(dev);
spin_lock_bh(&sl->lock);
if (sl->tty)
/* TTY discipline is running. */
clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
netif_stop_queue(dev);
sl->rcount = 0;
sl->xleft = 0;
spin_unlock_bh(&sl->lock);
return 0;
}
/* Netdevice DOWN -> UP routine */
static int sl_open(struct net_device *dev)
{
struct slip *sl = netdev_priv(dev);
if (sl->tty == NULL)
return -ENODEV;
sl->flags &= (1 << SLF_INUSE);
netif_start_queue(dev);
return 0;
}
/* Netdevice change MTU request */
static int sl_change_mtu(struct net_device *dev, int new_mtu)
{
struct slip *sl = netdev_priv(dev);
return sl_realloc_bufs(sl, new_mtu);
}
/* Netdevice get statistics request */
static void
sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct net_device_stats *devstats = &dev->stats;
#ifdef SL_INCLUDE_CSLIP
struct slip *sl = netdev_priv(dev);
struct slcompress *comp = sl->slcomp;
#endif
stats->rx_packets = devstats->rx_packets;
stats->tx_packets = devstats->tx_packets;
stats->rx_bytes = devstats->rx_bytes;
stats->tx_bytes = devstats->tx_bytes;
stats->rx_dropped = devstats->rx_dropped;
stats->tx_dropped = devstats->tx_dropped;
stats->tx_errors = devstats->tx_errors;
stats->rx_errors = devstats->rx_errors;
stats->rx_over_errors = devstats->rx_over_errors;
#ifdef SL_INCLUDE_CSLIP
if (comp) {
/* Generic compressed statistics */
stats->rx_compressed = comp->sls_i_compressed;
stats->tx_compressed = comp->sls_o_compressed;
/* Are we really still needs this? */
stats->rx_fifo_errors += comp->sls_i_compressed;
stats->rx_dropped += comp->sls_i_tossed;
stats->tx_fifo_errors += comp->sls_o_compressed;
stats->collisions += comp->sls_o_misses;
}
#endif
}
/* Netdevice register callback */
static int sl_init(struct net_device *dev)
{
struct slip *sl = netdev_priv(dev);
/*
* Finish setting up the DEVICE info.
*/
dev->mtu = sl->mtu;
dev->type = ARPHRD_SLIP + sl->mode;
#ifdef SL_CHECK_TRANSMIT
dev->watchdog_timeo = 20*HZ;
#endif
return 0;
}
static void sl_uninit(struct net_device *dev)
{
struct slip *sl = netdev_priv(dev);
sl_free_bufs(sl);
}
/* Hook the destructor so we can free slip devices at the right point in time */
static void sl_free_netdev(struct net_device *dev)
{
int i = dev->base_addr;
slip_devs[i] = NULL;
}
static const struct net_device_ops sl_netdev_ops = {
.ndo_init = sl_init,
.ndo_uninit = sl_uninit,
.ndo_open = sl_open,
.ndo_stop = sl_close,
.ndo_start_xmit = sl_xmit,
.ndo_get_stats64 = sl_get_stats64,
.ndo_change_mtu = sl_change_mtu,
.ndo_tx_timeout = sl_tx_timeout,
#ifdef CONFIG_SLIP_SMART
.ndo_siocdevprivate = sl_siocdevprivate,
#endif
};
static void sl_setup(struct net_device *dev)
{
dev->netdev_ops = &sl_netdev_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = sl_free_netdev;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
/* MTU range: 68 - 65534 */
dev->min_mtu = 68;
dev->max_mtu = 65534;
/* New-style flags. */
dev->flags = IFF_NOARP|IFF_POINTOPOINT|IFF_MULTICAST;
}
/******************************************
Routines looking at TTY side.
******************************************/
/*
* Handle the 'receiver data ready' interrupt.
* This function is called by the 'tty_io' module in the kernel when
* a block of SLIP data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing. This will not
* be re-entered while running but other ldisc functions may be called
* in parallel
*/
static void slip_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
size_t count)
{
struct slip *sl = tty->disc_data;
if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
return;
/* Read the characters out of the buffer */
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
cp++;
continue;
}
#ifdef CONFIG_SLIP_MODE_SLIP6
if (sl->mode & SL_MODE_SLIP6)
slip_unesc6(sl, *cp++);
else
#endif
slip_unesc(sl, *cp++);
}
}
/************************************
* slip_open helper routines.
************************************/
/* Collect hanged up channels */
static void sl_sync(void)
{
int i;
struct net_device *dev;
struct slip *sl;
for (i = 0; i < slip_maxdev; i++) {
dev = slip_devs[i];
if (dev == NULL)
break;
sl = netdev_priv(dev);
if (sl->tty || sl->leased)
continue;
if (dev->flags & IFF_UP)
dev_close(dev);
}
}
/* Find a free SLIP channel, and link in this `tty' line. */
static struct slip *sl_alloc(void)
{
int i;
char name[IFNAMSIZ];
struct net_device *dev = NULL;
struct slip *sl;
for (i = 0; i < slip_maxdev; i++) {
dev = slip_devs[i];
if (dev == NULL)
break;
}
/* Sorry, too many, all slots in use */
if (i >= slip_maxdev)
return NULL;
sprintf(name, "sl%d", i);
dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sl_setup);
if (!dev)
return NULL;
dev->base_addr = i;
sl = netdev_priv(dev);
/* Initialize channel control data */
sl->magic = SLIP_MAGIC;
sl->dev = dev;
spin_lock_init(&sl->lock);
INIT_WORK(&sl->tx_work, slip_transmit);
sl->mode = SL_MODE_DEFAULT;
#ifdef CONFIG_SLIP_SMART
/* initialize timer_list struct */
timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
timer_setup(&sl->outfill_timer, sl_outfill, 0);
#endif
slip_devs[i] = dev;
return sl;
}
/*
* Open the high-level part of the SLIP channel.
* This function is called by the TTY module when the
* SLIP line discipline is called for. Because we are
* sure the tty line exists, we only have to link it to
* a free SLIP channel...
*
* Called in process context serialized from other ldisc calls.
*/
static int slip_open(struct tty_struct *tty)
{
struct slip *sl;
int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
/* RTnetlink lock is misused here to serialize concurrent
opens of slip channels. There are better ways, but it is
the simplest one.
*/
rtnl_lock();
/* Collect hanged up channels. */
sl_sync();
sl = tty->disc_data;
err = -EEXIST;
/* First make sure we're not already connected. */
if (sl && sl->magic == SLIP_MAGIC)
goto err_exit;
/* OK. Find a free SLIP channel to use. */
err = -ENFILE;
sl = sl_alloc();
if (sl == NULL)
goto err_exit;
sl->tty = tty;
tty->disc_data = sl;
sl->pid = current->pid;
if (!test_bit(SLF_INUSE, &sl->flags)) {
/* Perform the low-level SLIP initialization. */
err = sl_alloc_bufs(sl, SL_MTU);
if (err)
goto err_free_chan;
set_bit(SLF_INUSE, &sl->flags);
err = register_netdevice(sl->dev);
if (err)
goto err_free_bufs;
}
#ifdef CONFIG_SLIP_SMART
if (sl->keepalive) {
sl->keepalive_timer.expires = jiffies + sl->keepalive * HZ;
add_timer(&sl->keepalive_timer);
}
if (sl->outfill) {
sl->outfill_timer.expires = jiffies + sl->outfill * HZ;
add_timer(&sl->outfill_timer);
}
#endif
/* Done. We have linked the TTY line to a channel. */
rtnl_unlock();
tty->receive_room = 65536; /* We don't flow control */
/* TTY layer expects 0 on success */
return 0;
err_free_bufs:
sl_free_bufs(sl);
err_free_chan:
sl->tty = NULL;
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
sl_free_netdev(sl->dev);
/* do not call free_netdev before rtnl_unlock */
rtnl_unlock();
free_netdev(sl->dev);
return err;
err_exit:
rtnl_unlock();
/* Count references from TTY module */
return err;
}
/*
* Close down a SLIP channel.
* This means flushing out any pending queues, and then returning. This
* call is serialized against other ldisc functions.
*
* We also use this method fo a hangup event
*/
static void slip_close(struct tty_struct *tty)
{
struct slip *sl = tty->disc_data;
/* First make sure we're connected. */
if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
return;
spin_lock_bh(&sl->lock);
rcu_assign_pointer(tty->disc_data, NULL);
sl->tty = NULL;
spin_unlock_bh(&sl->lock);
synchronize_rcu();
flush_work(&sl->tx_work);
/* VSV = very important to remove timers */
#ifdef CONFIG_SLIP_SMART
del_timer_sync(&sl->keepalive_timer);
del_timer_sync(&sl->outfill_timer);
#endif
/* Flush network side */
unregister_netdev(sl->dev);
/* This will complete via sl_free_netdev */
}
static void slip_hangup(struct tty_struct *tty)
{
slip_close(tty);
}
/************************************************************************
* STANDARD SLIP ENCAPSULATION *
************************************************************************/
static int slip_esc(unsigned char *s, unsigned char *d, int len)
{
unsigned char *ptr = d;
unsigned char c;
/*
* Send an initial END character to flush out any
* data that may have accumulated in the receiver
* due to line noise.
*/
*ptr++ = END;
/*
* For each byte in the packet, send the appropriate
* character sequence, according to the SLIP protocol.
*/
while (len-- > 0) {
switch (c = *s++) {
case END:
*ptr++ = ESC;
*ptr++ = ESC_END;
break;
case ESC:
*ptr++ = ESC;
*ptr++ = ESC_ESC;
break;
default:
*ptr++ = c;
break;
}
}
*ptr++ = END;
return ptr - d;
}
static void slip_unesc(struct slip *sl, unsigned char s)
{
switch (s) {
case END:
#ifdef CONFIG_SLIP_SMART
/* drop keeptest bit = VSV */
if (test_bit(SLF_KEEPTEST, &sl->flags))
clear_bit(SLF_KEEPTEST, &sl->flags);
#endif
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
(sl->rcount > 2))
sl_bump(sl);
clear_bit(SLF_ESCAPE, &sl->flags);
sl->rcount = 0;
return;
case ESC:
set_bit(SLF_ESCAPE, &sl->flags);
return;
case ESC_ESC:
if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
s = ESC;
break;
case ESC_END:
if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
s = END;
break;
}
if (!test_bit(SLF_ERROR, &sl->flags)) {
if (sl->rcount < sl->buffsize) {
sl->rbuff[sl->rcount++] = s;
return;
}
sl->dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
#ifdef CONFIG_SLIP_MODE_SLIP6
/************************************************************************
* 6 BIT SLIP ENCAPSULATION *
************************************************************************/
static int slip_esc6(unsigned char *s, unsigned char *d, int len)
{
unsigned char *ptr = d;
unsigned char c;
int i;
unsigned short v = 0;
short bits = 0;
/*
* Send an initial END character to flush out any
* data that may have accumulated in the receiver
* due to line noise.
*/
*ptr++ = 0x70;
/*
* Encode the packet into printable ascii characters
*/
for (i = 0; i < len; ++i) {
v = (v << 8) | s[i];
bits += 8;
while (bits >= 6) {
bits -= 6;
c = 0x30 + ((v >> bits) & 0x3F);
*ptr++ = c;
}
}
if (bits) {
c = 0x30 + ((v << (6 - bits)) & 0x3F);
*ptr++ = c;
}
*ptr++ = 0x70;
return ptr - d;
}
static void slip_unesc6(struct slip *sl, unsigned char s)
{
unsigned char c;
if (s == 0x70) {
#ifdef CONFIG_SLIP_SMART
/* drop keeptest bit = VSV */
if (test_bit(SLF_KEEPTEST, &sl->flags))
clear_bit(SLF_KEEPTEST, &sl->flags);
#endif
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
(sl->rcount > 2))
sl_bump(sl);
sl->rcount = 0;
sl->xbits = 0;
sl->xdata = 0;
} else if (s >= 0x30 && s < 0x70) {
sl->xdata = (sl->xdata << 6) | ((s - 0x30) & 0x3F);
sl->xbits += 6;
if (sl->xbits >= 8) {
sl->xbits -= 8;
c = (unsigned char)(sl->xdata >> sl->xbits);
if (!test_bit(SLF_ERROR, &sl->flags)) {
if (sl->rcount < sl->buffsize) {
sl->rbuff[sl->rcount++] = c;
return;
}
sl->dev->stats.rx_over_errors++;
set_bit(SLF_ERROR, &sl->flags);
}
}
}
}
#endif /* CONFIG_SLIP_MODE_SLIP6 */
/* Perform I/O control on an active SLIP channel. */
static int slip_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct slip *sl = tty->disc_data;
unsigned int tmp;
int __user *p = (int __user *)arg;
/* First make sure we're connected. */
if (!sl || sl->magic != SLIP_MAGIC)
return -EINVAL;
switch (cmd) {
case SIOCGIFNAME:
tmp = strlen(sl->dev->name) + 1;
if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
return -EFAULT;
return 0;
case SIOCGIFENCAP:
if (put_user(sl->mode, p))
return -EFAULT;
return 0;
case SIOCSIFENCAP:
if (get_user(tmp, p))
return -EFAULT;
#ifndef SL_INCLUDE_CSLIP
if (tmp & (SL_MODE_CSLIP|SL_MODE_ADAPTIVE))
return -EINVAL;
#else
if ((tmp & (SL_MODE_ADAPTIVE | SL_MODE_CSLIP)) ==
(SL_MODE_ADAPTIVE | SL_MODE_CSLIP))
/* return -EINVAL; */
tmp &= ~SL_MODE_ADAPTIVE;
#endif
#ifndef CONFIG_SLIP_MODE_SLIP6
if (tmp & SL_MODE_SLIP6)
return -EINVAL;
#endif
sl->mode = tmp;
sl->dev->type = ARPHRD_SLIP + sl->mode;
return 0;
case SIOCSIFHWADDR:
return -EINVAL;
#ifdef CONFIG_SLIP_SMART
/* VSV changes start here */
case SIOCSKEEPALIVE:
if (get_user(tmp, p))
return -EFAULT;
if (tmp > 255) /* max for unchar */
return -EINVAL;
spin_lock_bh(&sl->lock);
if (!sl->tty) {
spin_unlock_bh(&sl->lock);
return -ENODEV;
}
sl->keepalive = (u8)tmp;
if (sl->keepalive != 0) {
mod_timer(&sl->keepalive_timer,
jiffies + sl->keepalive * HZ);
set_bit(SLF_KEEPTEST, &sl->flags);
} else
del_timer(&sl->keepalive_timer);
spin_unlock_bh(&sl->lock);
return 0;
case SIOCGKEEPALIVE:
if (put_user(sl->keepalive, p))
return -EFAULT;
return 0;
case SIOCSOUTFILL:
if (get_user(tmp, p))
return -EFAULT;
if (tmp > 255) /* max for unchar */
return -EINVAL;
spin_lock_bh(&sl->lock);
if (!sl->tty) {
spin_unlock_bh(&sl->lock);
return -ENODEV;
}
sl->outfill = (u8)tmp;
if (sl->outfill != 0) {
mod_timer(&sl->outfill_timer,
jiffies + sl->outfill * HZ);
set_bit(SLF_OUTWAIT, &sl->flags);
} else
del_timer(&sl->outfill_timer);
spin_unlock_bh(&sl->lock);
return 0;
case SIOCGOUTFILL:
if (put_user(sl->outfill, p))
return -EFAULT;
return 0;
/* VSV changes end */
#endif
default:
return tty_mode_ioctl(tty, cmd, arg);
}
}
/* VSV changes start here */
#ifdef CONFIG_SLIP_SMART
/* function sl_siocdevprivate called from net/core/dev.c
to allow get/set outfill/keepalive parameter
by ifconfig */
static int sl_siocdevprivate(struct net_device *dev, struct ifreq *rq,
void __user *data, int cmd)
{
struct slip *sl = netdev_priv(dev);
unsigned long *p = (unsigned long *)&rq->ifr_ifru;
if (sl == NULL) /* Allocation failed ?? */
return -ENODEV;
if (in_compat_syscall())
return -EOPNOTSUPP;
spin_lock_bh(&sl->lock);
if (!sl->tty) {
spin_unlock_bh(&sl->lock);
return -ENODEV;
}
switch (cmd) {
case SIOCSKEEPALIVE:
/* max for unchar */
if ((unsigned)*p > 255) {
spin_unlock_bh(&sl->lock);
return -EINVAL;
}
sl->keepalive = (u8)*p;
if (sl->keepalive != 0) {
sl->keepalive_timer.expires =
jiffies + sl->keepalive * HZ;
mod_timer(&sl->keepalive_timer,
jiffies + sl->keepalive * HZ);
set_bit(SLF_KEEPTEST, &sl->flags);
} else
del_timer(&sl->keepalive_timer);
break;
case SIOCGKEEPALIVE:
*p = sl->keepalive;
break;
case SIOCSOUTFILL:
if ((unsigned)*p > 255) { /* max for unchar */
spin_unlock_bh(&sl->lock);
return -EINVAL;
}
sl->outfill = (u8)*p;
if (sl->outfill != 0) {
mod_timer(&sl->outfill_timer,
jiffies + sl->outfill * HZ);
set_bit(SLF_OUTWAIT, &sl->flags);
} else
del_timer(&sl->outfill_timer);
break;
case SIOCGOUTFILL:
*p = sl->outfill;
break;
case SIOCSLEASE:
/* Resolve race condition, when ioctl'ing hanged up
and opened by another process device.
*/
if (sl->tty != current->signal->tty &&
sl->pid != current->pid) {
spin_unlock_bh(&sl->lock);
return -EPERM;
}
sl->leased = 0;
if (*p)
sl->leased = 1;
break;
case SIOCGLEASE:
*p = sl->leased;
}
spin_unlock_bh(&sl->lock);
return 0;
}
#endif
/* VSV changes end */
static struct tty_ldisc_ops sl_ldisc = {
.owner = THIS_MODULE,
.num = N_SLIP,
.name = "slip",
.open = slip_open,
.close = slip_close,
.hangup = slip_hangup,
.ioctl = slip_ioctl,
.receive_buf = slip_receive_buf,
.write_wakeup = slip_write_wakeup,
};
static int __init slip_init(void)
{
int status;
if (slip_maxdev < 4)
slip_maxdev = 4; /* Sanity */
printk(KERN_INFO "SLIP: version %s (dynamic channels, max=%d)"
#ifdef CONFIG_SLIP_MODE_SLIP6
" (6 bit encapsulation enabled)"
#endif
".\n",
SLIP_VERSION, slip_maxdev);
#if defined(SL_INCLUDE_CSLIP)
printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California.\n");
#endif
#ifdef CONFIG_SLIP_SMART
printk(KERN_INFO "SLIP linefill/keepalive option.\n");
#endif
slip_devs = kcalloc(slip_maxdev, sizeof(struct net_device *),
GFP_KERNEL);
if (!slip_devs)
return -ENOMEM;
/* Fill in our line protocol discipline, and register it */
status = tty_register_ldisc(&sl_ldisc);
if (status != 0) {
printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
kfree(slip_devs);
}
return status;
}
static void __exit slip_exit(void)
{
int i;
struct net_device *dev;
struct slip *sl;
unsigned long timeout = jiffies + HZ;
int busy = 0;
if (slip_devs == NULL)
return;
/* First of all: check for active disciplines and hangup them.
*/
do {
if (busy)
msleep_interruptible(100);
busy = 0;
for (i = 0; i < slip_maxdev; i++) {
dev = slip_devs[i];
if (!dev)
continue;
sl = netdev_priv(dev);
spin_lock_bh(&sl->lock);
if (sl->tty) {
busy++;
tty_hangup(sl->tty);
}
spin_unlock_bh(&sl->lock);
}
} while (busy && time_before(jiffies, timeout));
/* FIXME: hangup is async so we should wait when doing this second
phase */
for (i = 0; i < slip_maxdev; i++) {
dev = slip_devs[i];
if (!dev)
continue;
slip_devs[i] = NULL;
sl = netdev_priv(dev);
if (sl->tty) {
printk(KERN_ERR "%s: tty discipline still running\n",
dev->name);
}
unregister_netdev(dev);
}
kfree(slip_devs);
slip_devs = NULL;
tty_unregister_ldisc(&sl_ldisc);
}
module_init(slip_init);
module_exit(slip_exit);
#ifdef CONFIG_SLIP_SMART
/*
* This is start of the code for multislip style line checking
* added by Stanislav Voronyi. All changes before marked VSV
*/
static void sl_outfill(struct timer_list *t)
{
struct slip *sl = from_timer(sl, t, outfill_timer);
spin_lock(&sl->lock);
if (sl->tty == NULL)
goto out;
if (sl->outfill) {
if (test_bit(SLF_OUTWAIT, &sl->flags)) {
/* no packets were transmitted, do outfill */
#ifdef CONFIG_SLIP_MODE_SLIP6
unsigned char s = (sl->mode & SL_MODE_SLIP6)?0x70:END;
#else
unsigned char s = END;
#endif
/* put END into tty queue. Is it right ??? */
if (!netif_queue_stopped(sl->dev)) {
/* if device busy no outfill */
sl->tty->ops->write(sl->tty, &s, 1);
}
} else
set_bit(SLF_OUTWAIT, &sl->flags);
mod_timer(&sl->outfill_timer, jiffies+sl->outfill*HZ);
}
out:
spin_unlock(&sl->lock);
}
static void sl_keepalive(struct timer_list *t)
{
struct slip *sl = from_timer(sl, t, keepalive_timer);
spin_lock(&sl->lock);
if (sl->tty == NULL)
goto out;
if (sl->keepalive) {
if (test_bit(SLF_KEEPTEST, &sl->flags)) {
/* keepalive still high :(, we must hangup */
if (sl->outfill)
/* outfill timer must be deleted too */
(void)del_timer(&sl->outfill_timer);
printk(KERN_DEBUG "%s: no packets received during keepalive timeout, hangup.\n", sl->dev->name);
/* this must hangup tty & close slip */
tty_hangup(sl->tty);
/* I think we need not something else */
goto out;
} else
set_bit(SLF_KEEPTEST, &sl->flags);
mod_timer(&sl->keepalive_timer, jiffies+sl->keepalive*HZ);
}
out:
spin_unlock(&sl->lock);
}
#endif
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_SLIP);
|
linux-master
|
drivers/net/slip/slip.c
|
// SPDX-License-Identifier: GPL-2.0-only
//
// Driver for the regulator based Ethernet Power Sourcing Equipment, without
// auto classification support.
//
// Copyright (c) 2022 Pengutronix, Oleksij Rempel <[email protected]>
//
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pse-pd/pse.h>
#include <linux/regulator/consumer.h>
struct pse_reg_priv {
struct pse_controller_dev pcdev;
struct regulator *ps; /*power source */
enum ethtool_podl_pse_admin_state admin_state;
};
static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev)
{
return container_of(pcdev, struct pse_reg_priv, pcdev);
}
static int
pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id,
struct netlink_ext_ack *extack,
const struct pse_control_config *config)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
int ret;
if (priv->admin_state == config->admin_cotrol)
return 0;
switch (config->admin_cotrol) {
case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED:
ret = regulator_enable(priv->ps);
break;
case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED:
ret = regulator_disable(priv->ps);
break;
default:
dev_err(pcdev->dev, "Unknown admin state %i\n",
config->admin_cotrol);
ret = -ENOTSUPP;
}
if (ret)
return ret;
priv->admin_state = config->admin_cotrol;
return 0;
}
static int
pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
int ret;
ret = regulator_is_enabled(priv->ps);
if (ret < 0)
return ret;
if (!ret)
status->podl_pw_status = ETHTOOL_PODL_PSE_PW_D_STATUS_DISABLED;
else
status->podl_pw_status =
ETHTOOL_PODL_PSE_PW_D_STATUS_DELIVERING;
status->podl_admin_state = priv->admin_state;
return 0;
}
static const struct pse_controller_ops pse_reg_ops = {
.ethtool_get_status = pse_reg_ethtool_get_status,
.ethtool_set_config = pse_reg_ethtool_set_config,
};
static int
pse_reg_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pse_reg_priv *priv;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (!pdev->dev.of_node)
return -ENOENT;
priv->ps = devm_regulator_get_exclusive(dev, "pse");
if (IS_ERR(priv->ps))
return dev_err_probe(dev, PTR_ERR(priv->ps),
"failed to get PSE regulator.\n");
platform_set_drvdata(pdev, priv);
ret = regulator_is_enabled(priv->ps);
if (ret < 0)
return ret;
if (ret)
priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED;
else
priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED;
priv->pcdev.owner = THIS_MODULE;
priv->pcdev.ops = &pse_reg_ops;
priv->pcdev.dev = dev;
ret = devm_pse_controller_register(dev, &priv->pcdev);
if (ret) {
dev_err(dev, "failed to register PSE controller (%pe)\n",
ERR_PTR(ret));
return ret;
}
return 0;
}
static const __maybe_unused struct of_device_id pse_reg_of_match[] = {
{ .compatible = "podl-pse-regulator", },
{ },
};
MODULE_DEVICE_TABLE(of, pse_reg_of_match);
static struct platform_driver pse_reg_driver = {
.probe = pse_reg_probe,
.driver = {
.name = "PSE regulator",
.of_match_table = of_match_ptr(pse_reg_of_match),
},
};
module_platform_driver(pse_reg_driver);
MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
MODULE_DESCRIPTION("regulator based Ethernet Power Sourcing Equipment");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:pse-regulator");
|
linux-master
|
drivers/net/pse-pd/pse_regulator.c
|
// SPDX-License-Identifier: GPL-2.0-only
//
// Framework for Ethernet Power Sourcing Equipment
//
// Copyright (c) 2022 Pengutronix, Oleksij Rempel <[email protected]>
//
#include <linux/device.h>
#include <linux/of.h>
#include <linux/pse-pd/pse.h>
static DEFINE_MUTEX(pse_list_mutex);
static LIST_HEAD(pse_controller_list);
/**
* struct pse_control - a PSE control
* @pcdev: a pointer to the PSE controller device
* this PSE control belongs to
* @list: list entry for the pcdev's PSE controller list
* @id: ID of the PSE line in the PSE controller device
* @refcnt: Number of gets of this pse_control
*/
struct pse_control {
struct pse_controller_dev *pcdev;
struct list_head list;
unsigned int id;
struct kref refcnt;
};
/**
* of_pse_zero_xlate - dummy function for controllers with one only control
* @pcdev: a pointer to the PSE controller device
* @pse_spec: PSE line specifier as found in the device tree
*
* This static translation function is used by default if of_xlate in
* :c:type:`pse_controller_dev` is not set. It is useful for all PSE
* controllers with #pse-cells = <0>.
*/
static int of_pse_zero_xlate(struct pse_controller_dev *pcdev,
const struct of_phandle_args *pse_spec)
{
return 0;
}
/**
* of_pse_simple_xlate - translate pse_spec to the PSE line number
* @pcdev: a pointer to the PSE controller device
* @pse_spec: PSE line specifier as found in the device tree
*
* This static translation function is used by default if of_xlate in
* :c:type:`pse_controller_dev` is not set. It is useful for all PSE
* controllers with 1:1 mapping, where PSE lines can be indexed by number
* without gaps.
*/
static int of_pse_simple_xlate(struct pse_controller_dev *pcdev,
const struct of_phandle_args *pse_spec)
{
if (pse_spec->args[0] >= pcdev->nr_lines)
return -EINVAL;
return pse_spec->args[0];
}
/**
* pse_controller_register - register a PSE controller device
* @pcdev: a pointer to the initialized PSE controller device
*/
int pse_controller_register(struct pse_controller_dev *pcdev)
{
if (!pcdev->of_xlate) {
if (pcdev->of_pse_n_cells == 0)
pcdev->of_xlate = of_pse_zero_xlate;
else if (pcdev->of_pse_n_cells == 1)
pcdev->of_xlate = of_pse_simple_xlate;
}
mutex_init(&pcdev->lock);
INIT_LIST_HEAD(&pcdev->pse_control_head);
mutex_lock(&pse_list_mutex);
list_add(&pcdev->list, &pse_controller_list);
mutex_unlock(&pse_list_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(pse_controller_register);
/**
* pse_controller_unregister - unregister a PSE controller device
* @pcdev: a pointer to the PSE controller device
*/
void pse_controller_unregister(struct pse_controller_dev *pcdev)
{
mutex_lock(&pse_list_mutex);
list_del(&pcdev->list);
mutex_unlock(&pse_list_mutex);
}
EXPORT_SYMBOL_GPL(pse_controller_unregister);
static void devm_pse_controller_release(struct device *dev, void *res)
{
pse_controller_unregister(*(struct pse_controller_dev **)res);
}
/**
* devm_pse_controller_register - resource managed pse_controller_register()
* @dev: device that is registering this PSE controller
* @pcdev: a pointer to the initialized PSE controller device
*
* Managed pse_controller_register(). For PSE controllers registered by
* this function, pse_controller_unregister() is automatically called on
* driver detach. See pse_controller_register() for more information.
*/
int devm_pse_controller_register(struct device *dev,
struct pse_controller_dev *pcdev)
{
struct pse_controller_dev **pcdevp;
int ret;
pcdevp = devres_alloc(devm_pse_controller_release, sizeof(*pcdevp),
GFP_KERNEL);
if (!pcdevp)
return -ENOMEM;
ret = pse_controller_register(pcdev);
if (ret) {
devres_free(pcdevp);
return ret;
}
*pcdevp = pcdev;
devres_add(dev, pcdevp);
return 0;
}
EXPORT_SYMBOL_GPL(devm_pse_controller_register);
/* PSE control section */
static void __pse_control_release(struct kref *kref)
{
struct pse_control *psec = container_of(kref, struct pse_control,
refcnt);
lockdep_assert_held(&pse_list_mutex);
module_put(psec->pcdev->owner);
list_del(&psec->list);
kfree(psec);
}
static void __pse_control_put_internal(struct pse_control *psec)
{
lockdep_assert_held(&pse_list_mutex);
kref_put(&psec->refcnt, __pse_control_release);
}
/**
* pse_control_put - free the PSE control
* @psec: PSE control pointer
*/
void pse_control_put(struct pse_control *psec)
{
if (IS_ERR_OR_NULL(psec))
return;
mutex_lock(&pse_list_mutex);
__pse_control_put_internal(psec);
mutex_unlock(&pse_list_mutex);
}
EXPORT_SYMBOL_GPL(pse_control_put);
static struct pse_control *
pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
{
struct pse_control *psec;
lockdep_assert_held(&pse_list_mutex);
list_for_each_entry(psec, &pcdev->pse_control_head, list) {
if (psec->id == index) {
kref_get(&psec->refcnt);
return psec;
}
}
psec = kzalloc(sizeof(*psec), GFP_KERNEL);
if (!psec)
return ERR_PTR(-ENOMEM);
if (!try_module_get(pcdev->owner)) {
kfree(psec);
return ERR_PTR(-ENODEV);
}
psec->pcdev = pcdev;
list_add(&psec->list, &pcdev->pse_control_head);
psec->id = index;
kref_init(&psec->refcnt);
return psec;
}
struct pse_control *
of_pse_control_get(struct device_node *node)
{
struct pse_controller_dev *r, *pcdev;
struct of_phandle_args args;
struct pse_control *psec;
int psec_id;
int ret;
if (!node)
return ERR_PTR(-EINVAL);
ret = of_parse_phandle_with_args(node, "pses", "#pse-cells", 0, &args);
if (ret)
return ERR_PTR(ret);
mutex_lock(&pse_list_mutex);
pcdev = NULL;
list_for_each_entry(r, &pse_controller_list, list) {
if (args.np == r->dev->of_node) {
pcdev = r;
break;
}
}
if (!pcdev) {
psec = ERR_PTR(-EPROBE_DEFER);
goto out;
}
if (WARN_ON(args.args_count != pcdev->of_pse_n_cells)) {
psec = ERR_PTR(-EINVAL);
goto out;
}
psec_id = pcdev->of_xlate(pcdev, &args);
if (psec_id < 0) {
psec = ERR_PTR(psec_id);
goto out;
}
/* pse_list_mutex also protects the pcdev's pse_control list */
psec = pse_control_get_internal(pcdev, psec_id);
out:
mutex_unlock(&pse_list_mutex);
of_node_put(args.np);
return psec;
}
EXPORT_SYMBOL_GPL(of_pse_control_get);
/**
* pse_ethtool_get_status - get status of PSE control
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
* @status: struct to store PSE status
*/
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
{
const struct pse_controller_ops *ops;
int err;
ops = psec->pcdev->ops;
if (!ops->ethtool_get_status) {
NL_SET_ERR_MSG(extack,
"PSE driver does not support status report");
return -EOPNOTSUPP;
}
mutex_lock(&psec->pcdev->lock);
err = ops->ethtool_get_status(psec->pcdev, psec->id, extack, status);
mutex_unlock(&psec->pcdev->lock);
return err;
}
EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
/**
* pse_ethtool_set_config - set PSE control configuration
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
* @config: Configuration of the test to run
*/
int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config)
{
const struct pse_controller_ops *ops;
int err;
ops = psec->pcdev->ops;
if (!ops->ethtool_set_config) {
NL_SET_ERR_MSG(extack,
"PSE driver does not configuration");
return -EOPNOTSUPP;
}
mutex_lock(&psec->pcdev->lock);
err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config);
mutex_unlock(&psec->pcdev->lock);
return err;
}
EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
|
linux-master
|
drivers/net/pse-pd/pse_core.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
* Copyright (C) 2008-2023, VMware, Inc. All Rights Reserved.
* Maintained by: [email protected]
*
*/
#include "vmxnet3_int.h"
#include "vmxnet3_xdp.h"
static void
vmxnet3_xdp_exchange_program(struct vmxnet3_adapter *adapter,
struct bpf_prog *prog)
{
rcu_assign_pointer(adapter->xdp_bpf_prog, prog);
}
static inline struct vmxnet3_tx_queue *
vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
{
struct vmxnet3_tx_queue *tq;
int tq_number;
int cpu;
tq_number = adapter->num_tx_queues;
cpu = smp_processor_id();
if (likely(cpu < tq_number))
tq = &adapter->tx_queue[cpu];
else
tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
return tq;
}
static int
vmxnet3_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf,
struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct bpf_prog *new_bpf_prog = bpf->prog;
struct bpf_prog *old_bpf_prog;
bool need_update;
bool running;
int err;
if (new_bpf_prog && netdev->mtu > VMXNET3_XDP_MAX_MTU) {
NL_SET_ERR_MSG_FMT_MOD(extack, "MTU %u too large for XDP",
netdev->mtu);
return -EOPNOTSUPP;
}
if (adapter->netdev->features & NETIF_F_LRO) {
NL_SET_ERR_MSG_MOD(extack, "LRO is not supported with XDP");
adapter->netdev->features &= ~NETIF_F_LRO;
}
old_bpf_prog = rcu_dereference(adapter->xdp_bpf_prog);
if (!new_bpf_prog && !old_bpf_prog)
return 0;
running = netif_running(netdev);
need_update = !!old_bpf_prog != !!new_bpf_prog;
if (running && need_update)
vmxnet3_quiesce_dev(adapter);
vmxnet3_xdp_exchange_program(adapter, new_bpf_prog);
if (old_bpf_prog)
bpf_prog_put(old_bpf_prog);
if (!running || !need_update)
return 0;
if (new_bpf_prog)
xdp_features_set_redirect_target(netdev, false);
else
xdp_features_clear_redirect_target(netdev);
vmxnet3_reset_dev(adapter);
vmxnet3_rq_destroy_all(adapter);
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"failed to re-create rx queues for XDP.");
return -EOPNOTSUPP;
}
err = vmxnet3_activate_dev(adapter);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"failed to activate device for XDP.");
return -EOPNOTSUPP;
}
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
return 0;
}
/* This is the main xdp call used by kernel to set/unset eBPF program. */
int
vmxnet3_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
{
switch (bpf->command) {
case XDP_SETUP_PROG:
return vmxnet3_xdp_set(netdev, bpf, bpf->extack);
default:
return -EINVAL;
}
return 0;
}
static int
vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
struct xdp_frame *xdpf,
struct vmxnet3_tx_queue *tq, bool dma_map)
{
struct vmxnet3_tx_buf_info *tbi = NULL;
union Vmxnet3_GenericDesc *gdesc;
struct vmxnet3_tx_ctx ctx;
int tx_num_deferred;
struct page *page;
u32 buf_size;
u32 dw2;
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
dw2 |= xdpf->len;
ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
gdesc = ctx.sop_txd;
buf_size = xdpf->len;
tbi = tq->buf_info + tq->tx_ring.next2fill;
if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
tq->stats.tx_ring_full++;
return -ENOSPC;
}
tbi->map_type = VMXNET3_MAP_XDP;
if (dma_map) { /* ndo_xdp_xmit */
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
xdpf->data, buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
return -EFAULT;
tbi->map_type |= VMXNET3_MAP_SINGLE;
} else { /* XDP buffer from page pool */
page = virt_to_page(xdpf->data);
tbi->dma_addr = page_pool_get_dma_addr(page) +
VMXNET3_XDP_HEADROOM;
dma_sync_single_for_device(&adapter->pdev->dev,
tbi->dma_addr, buf_size,
DMA_TO_DEVICE);
}
tbi->xdpf = xdpf;
tbi->len = buf_size;
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
gdesc->dword[2] = cpu_to_le32(dw2);
/* Setup the EOP desc */
gdesc->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
gdesc->txd.om = 0;
gdesc->txd.msscof = 0;
gdesc->txd.hlen = 0;
gdesc->txd.ti = 0;
tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
le32_add_cpu(&tq->shared->txNumDeferred, 1);
tx_num_deferred++;
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
/* set the last buf_info for the pkt */
tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base;
dma_wmb();
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
/* No need to handle the case when tx_num_deferred doesn't reach
* threshold. Backend driver at hypervisor side will poll and reset
* tq->shared->txNumDeferred to 0.
*/
if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
VMXNET3_WRITE_BAR0_REG(adapter,
VMXNET3_REG_TXPROD + tq->qid * 8,
tq->tx_ring.next2fill);
}
return 0;
}
static int
vmxnet3_xdp_xmit_back(struct vmxnet3_adapter *adapter,
struct xdp_frame *xdpf)
{
struct vmxnet3_tx_queue *tq;
struct netdev_queue *nq;
int err;
tq = vmxnet3_xdp_get_tq(adapter);
if (tq->stopped)
return -ENETDOWN;
nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
__netif_tx_lock(nq, smp_processor_id());
err = vmxnet3_xdp_xmit_frame(adapter, xdpf, tq, false);
__netif_tx_unlock(nq);
return err;
}
/* ndo_xdp_xmit */
int
vmxnet3_xdp_xmit(struct net_device *dev,
int n, struct xdp_frame **frames, u32 flags)
{
struct vmxnet3_adapter *adapter = netdev_priv(dev);
struct vmxnet3_tx_queue *tq;
int i;
if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
return -ENETDOWN;
if (unlikely(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)))
return -EINVAL;
tq = vmxnet3_xdp_get_tq(adapter);
if (tq->stopped)
return -ENETDOWN;
for (i = 0; i < n; i++) {
if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
tq->stats.xdp_xmit_err++;
break;
}
}
tq->stats.xdp_xmit += i;
return i;
}
static int
vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp,
struct bpf_prog *prog)
{
struct xdp_frame *xdpf;
struct page *page;
int err;
u32 act;
rq->stats.xdp_packets++;
act = bpf_prog_run_xdp(prog, xdp);
page = virt_to_page(xdp->data_hard_start);
switch (act) {
case XDP_PASS:
return act;
case XDP_REDIRECT:
err = xdp_do_redirect(rq->adapter->netdev, xdp, prog);
if (!err) {
rq->stats.xdp_redirects++;
} else {
rq->stats.xdp_drops++;
page_pool_recycle_direct(rq->page_pool, page);
}
return act;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf ||
vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) {
rq->stats.xdp_drops++;
page_pool_recycle_direct(rq->page_pool, page);
} else {
rq->stats.xdp_tx++;
}
return act;
default:
bpf_warn_invalid_xdp_action(rq->adapter->netdev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rq->adapter->netdev, prog, act);
rq->stats.xdp_aborted++;
break;
case XDP_DROP:
rq->stats.xdp_drops++;
break;
}
page_pool_recycle_direct(rq->page_pool, page);
return act;
}
static struct sk_buff *
vmxnet3_build_skb(struct vmxnet3_rx_queue *rq, struct page *page,
const struct xdp_buff *xdp)
{
struct sk_buff *skb;
skb = build_skb(page_address(page), PAGE_SIZE);
if (unlikely(!skb)) {
page_pool_recycle_direct(rq->page_pool, page);
rq->stats.rx_buf_alloc_failure++;
return NULL;
}
/* bpf prog might change len and data position. */
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
skb_mark_for_recycle(skb);
return skb;
}
/* Handle packets from DataRing. */
int
vmxnet3_process_xdp_small(struct vmxnet3_adapter *adapter,
struct vmxnet3_rx_queue *rq,
void *data, int len,
struct sk_buff **skb_xdp_pass)
{
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
struct page *page;
int act;
page = page_pool_alloc_pages(rq->page_pool, GFP_ATOMIC);
if (unlikely(!page)) {
rq->stats.rx_buf_alloc_failure++;
return XDP_DROP;
}
xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
len, false);
xdp_buff_clear_frags_flag(&xdp);
/* Must copy the data because it's at dataring. */
memcpy(xdp.data, data, len);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
if (!xdp_prog) {
act = XDP_PASS;
goto out_skb;
}
act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
if (act != XDP_PASS)
return act;
out_skb:
*skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
if (!*skb_xdp_pass)
return XDP_DROP;
/* No need to refill. */
return likely(*skb_xdp_pass) ? act : XDP_DROP;
}
int
vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
struct vmxnet3_rx_queue *rq,
struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_buf_info *rbi,
struct Vmxnet3_RxDesc *rxd,
struct sk_buff **skb_xdp_pass)
{
struct bpf_prog *xdp_prog;
dma_addr_t new_dma_addr;
struct xdp_buff xdp;
struct page *page;
void *new_data;
int act;
page = rbi->page;
dma_sync_single_for_cpu(&adapter->pdev->dev,
page_pool_get_dma_addr(page) +
rq->page_pool->p.offset, rcd->len,
page_pool_get_dma_dir(rq->page_pool));
xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
rcd->len, false);
xdp_buff_clear_frags_flag(&xdp);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
if (!xdp_prog) {
act = XDP_PASS;
goto out_skb;
}
act = vmxnet3_run_xdp(rq, &xdp, xdp_prog);
if (act == XDP_PASS) {
out_skb:
*skb_xdp_pass = vmxnet3_build_skb(rq, page, &xdp);
if (!*skb_xdp_pass)
act = XDP_DROP;
}
new_data = vmxnet3_pp_get_buff(rq->page_pool, &new_dma_addr,
GFP_ATOMIC);
if (!new_data) {
rq->stats.rx_buf_alloc_failure++;
return XDP_DROP;
}
rbi->page = virt_to_page(new_data);
rbi->dma_addr = new_dma_addr;
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
return act;
}
|
linux-master
|
drivers/net/vmxnet3/vmxnet3_xdp.c
|
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: [email protected]
*
*/
#include <linux/module.h>
#include <net/ip6_checksum.h>
#include "vmxnet3_int.h"
#include "vmxnet3_xdp.h"
char vmxnet3_driver_name[] = "vmxnet3";
#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
/*
* PCI Device ID Table
* Last entry must be all 0s
*/
static const struct pci_device_id vmxnet3_pciid_table[] = {
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
{0}
};
MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
static int enable_mq = 1;
static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
/*
* Enable/Disable the given intr
*/
static void
vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
{
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
}
static void
vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
{
VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
}
/*
* Enable/Disable all intrs used by the device
*/
static void
vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->intr.num_intrs; i++)
vmxnet3_enable_intr(adapter, i);
if (!VMXNET3_VERSION_GE_6(adapter) ||
!adapter->queuesExtEnabled) {
adapter->shared->devRead.intrConf.intrCtrl &=
cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
} else {
adapter->shared->devReadExt.intrConfExt.intrCtrl &=
cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
}
}
static void
vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
{
int i;
if (!VMXNET3_VERSION_GE_6(adapter) ||
!adapter->queuesExtEnabled) {
adapter->shared->devRead.intrConf.intrCtrl |=
cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
} else {
adapter->shared->devReadExt.intrConfExt.intrCtrl |=
cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
}
for (i = 0; i < adapter->intr.num_intrs; i++)
vmxnet3_disable_intr(adapter, i);
}
static void
vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
{
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
}
static bool
vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
return tq->stopped;
}
static void
vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
}
static void
vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = false;
netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
static void
vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
{
tq->stopped = true;
tq->num_stop++;
netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
/* Check if capability is supported by UPT device or
* UPT is even requested
*/
bool
vmxnet3_check_ptcapability(u32 cap_supported, u32 cap)
{
if (cap_supported & (1UL << VMXNET3_DCR_ERROR) ||
cap_supported & (1UL << cap)) {
return true;
}
return false;
}
/*
* Check the link state. This may start or stop the tx queue.
*/
static void
vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
{
u32 ret;
int i;
unsigned long flags;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
adapter->link_speed = ret >> 16;
if (ret & 1) { /* Link is up. */
netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
adapter->link_speed);
netif_carrier_on(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_start(&adapter->tx_queue[i],
adapter);
}
} else {
netdev_info(adapter->netdev, "NIC Link is Down\n");
netif_carrier_off(adapter->netdev);
if (affectTxQueue) {
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
}
}
}
static void
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
{
int i;
unsigned long flags;
u32 events = le32_to_cpu(adapter->shared->ecr);
if (!events)
return;
vmxnet3_ack_events(adapter, events);
/* Check if link state has changed */
if (events & VMXNET3_ECR_LINK)
vmxnet3_check_link(adapter, true);
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tqd_start[i].status.stopped)
dev_err(&adapter->netdev->dev,
"%s: tq[%d] error 0x%x\n",
adapter->netdev->name, i, le32_to_cpu(
adapter->tqd_start[i].status.error));
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rqd_start[i].status.stopped)
dev_err(&adapter->netdev->dev,
"%s: rq[%d] error 0x%x\n",
adapter->netdev->name, i,
adapter->rqd_start[i].status.error);
schedule_work(&adapter->work);
}
}
#ifdef __BIG_ENDIAN_BITFIELD
/*
* The device expects the bitfields in shared structures to be written in
* little endian. When CPU is big endian, the following routines are used to
* correctly read and write into ABI.
* The general technique used here is : double word bitfields are defined in
* opposite order for big endian architecture. Then before reading them in
* driver the complete double word is translated using le32_to_cpu. Similarly
* After the driver writes into bitfields, cpu_to_le32 is used to translate the
* double words into required format.
* In order to avoid touching bits in shared structure more than once, temporary
* descriptors are used. These are passed as srcDesc to following functions.
*/
static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
struct Vmxnet3_RxDesc *dstDesc)
{
u32 *src = (u32 *)srcDesc + 2;
u32 *dst = (u32 *)dstDesc + 2;
dstDesc->addr = le64_to_cpu(srcDesc->addr);
*dst = le32_to_cpu(*src);
dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
}
static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
struct Vmxnet3_TxDesc *dstDesc)
{
int i;
u32 *src = (u32 *)(srcDesc + 1);
u32 *dst = (u32 *)(dstDesc + 1);
/* Working backwards so that the gen bit is set at the end. */
for (i = 2; i > 0; i--) {
src--;
dst--;
*dst = cpu_to_le32(*src);
}
}
static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
struct Vmxnet3_RxCompDesc *dstDesc)
{
int i = 0;
u32 *src = (u32 *)srcDesc;
u32 *dst = (u32 *)dstDesc;
for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
*dst = le32_to_cpu(*src);
src++;
dst++;
}
}
/* Used to read bitfield values from double words. */
static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
{
u32 temp = le32_to_cpu(*bitfield);
u32 mask = ((1 << size) - 1) << pos;
temp &= mask;
temp >>= pos;
return temp;
}
#endif /* __BIG_ENDIAN_BITFIELD */
#ifdef __BIG_ENDIAN_BITFIELD
# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
VMXNET3_TCD_GEN_SIZE)
# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
(dstrcd) = (tmp); \
vmxnet3_RxCompToCPU((rcd), (tmp)); \
} while (0)
# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
(dstrxd) = (tmp); \
vmxnet3_RxDescToCPU((rxd), (tmp)); \
} while (0)
#else
# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
#endif /* __BIG_ENDIAN_BITFIELD */
static void
vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
struct pci_dev *pdev)
{
u32 map_type = tbi->map_type;
if (map_type & VMXNET3_MAP_SINGLE)
dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
DMA_TO_DEVICE);
else if (map_type & VMXNET3_MAP_PAGE)
dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
DMA_TO_DEVICE);
else
BUG_ON(map_type & ~VMXNET3_MAP_XDP);
tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
}
static int
vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
struct pci_dev *pdev, struct vmxnet3_adapter *adapter,
struct xdp_frame_bulk *bq)
{
struct vmxnet3_tx_buf_info *tbi;
int entries = 0;
u32 map_type;
/* no out of order completion */
BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
tbi = &tq->buf_info[eop_idx];
BUG_ON(!tbi->skb);
map_type = tbi->map_type;
VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
while (tq->tx_ring.next2comp != eop_idx) {
vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
pdev);
/* update next2comp w/o tx_lock. Since we are marking more,
* instead of less, tx ring entries avail, the worst case is
* that the tx routine incorrectly re-queues a pkt due to
* insufficient tx ring entries.
*/
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
entries++;
}
if (map_type & VMXNET3_MAP_XDP)
xdp_return_frame_bulk(tbi->xdpf, bq);
else
dev_kfree_skb_any(tbi->skb);
/* xdpf and skb are in an anonymous union. */
tbi->skb = NULL;
return entries;
}
static int
vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
union Vmxnet3_GenericDesc *gdesc;
struct xdp_frame_bulk bq;
int completed = 0;
xdp_frame_bulk_init(&bq);
rcu_read_lock();
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
/* Prevent any &gdesc->tcd field from being (speculatively)
* read before (&gdesc->tcd)->gen is read.
*/
dma_rmb();
completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
&gdesc->tcd), tq, adapter->pdev,
adapter, &bq);
vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
}
xdp_flush_frame_bulk(&bq);
rcu_read_unlock();
if (completed) {
spin_lock(&tq->tx_lock);
if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
netif_carrier_ok(adapter->netdev))) {
vmxnet3_tq_wake(tq, adapter);
}
spin_unlock(&tq->tx_lock);
}
return completed;
}
static void
vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
struct xdp_frame_bulk bq;
u32 map_type;
int i;
xdp_frame_bulk_init(&bq);
rcu_read_lock();
while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
struct vmxnet3_tx_buf_info *tbi;
tbi = tq->buf_info + tq->tx_ring.next2comp;
map_type = tbi->map_type;
vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
if (tbi->skb) {
if (map_type & VMXNET3_MAP_XDP)
xdp_return_frame_bulk(tbi->xdpf, &bq);
else
dev_kfree_skb_any(tbi->skb);
tbi->skb = NULL;
}
vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
}
xdp_flush_frame_bulk(&bq);
rcu_read_unlock();
/* sanity check, verify all buffers are indeed unmapped */
for (i = 0; i < tq->tx_ring.size; i++)
BUG_ON(tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
tq->tx_ring.gen = VMXNET3_INIT_GEN;
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->comp_ring.gen = VMXNET3_INIT_GEN;
tq->comp_ring.next2proc = 0;
}
static void
vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
if (tq->tx_ring.base) {
dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
sizeof(struct Vmxnet3_TxDesc),
tq->tx_ring.base, tq->tx_ring.basePA);
tq->tx_ring.base = NULL;
}
if (tq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
tq->data_ring.size * tq->txdata_desc_size,
tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
if (tq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc),
tq->comp_ring.base, tq->comp_ring.basePA);
tq->comp_ring.base = NULL;
}
kfree(tq->buf_info);
tq->buf_info = NULL;
}
/* Destroy all tx queues */
void
vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
}
static void
vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
int i;
/* reset the tx ring contents to 0 and reset the tx ring states */
memset(tq->tx_ring.base, 0, tq->tx_ring.size *
sizeof(struct Vmxnet3_TxDesc));
tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
tq->tx_ring.gen = VMXNET3_INIT_GEN;
memset(tq->data_ring.base, 0,
tq->data_ring.size * tq->txdata_desc_size);
/* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc));
tq->comp_ring.next2proc = 0;
tq->comp_ring.gen = VMXNET3_INIT_GEN;
/* reset the bookkeeping data */
memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
for (i = 0; i < tq->tx_ring.size; i++)
tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
/* stats are not reset */
}
static int
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
tq->comp_ring.base || tq->buf_info);
tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
&tq->tx_ring.basePA, GFP_KERNEL);
if (!tq->tx_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx ring\n");
goto err;
}
tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->data_ring.size * tq->txdata_desc_size,
&tq->data_ring.basePA, GFP_KERNEL);
if (!tq->data_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
goto err;
}
tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
&tq->comp_ring.basePA, GFP_KERNEL);
if (!tq->comp_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
goto err;
}
tq->buf_info = kcalloc_node(tq->tx_ring.size, sizeof(tq->buf_info[0]),
GFP_KERNEL,
dev_to_node(&adapter->pdev->dev));
if (!tq->buf_info)
goto err;
return 0;
err:
vmxnet3_tq_destroy(tq, adapter);
return -ENOMEM;
}
static void
vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
}
/*
* starting from ring->next2fill, allocate rx buffers for the given ring
* of the rx queue and update the rx desc. stop after @num_to_alloc buffers
* are allocated or allocation fails
*/
static int
vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
int num_to_alloc, struct vmxnet3_adapter *adapter)
{
int num_allocated = 0;
struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
u32 val;
while (num_allocated <= num_to_alloc) {
struct vmxnet3_rx_buf_info *rbi;
union Vmxnet3_GenericDesc *gd;
rbi = rbi_base + ring->next2fill;
gd = ring->base + ring->next2fill;
rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
if (rbi->buf_type == VMXNET3_RX_BUF_XDP) {
void *data = vmxnet3_pp_get_buff(rq->page_pool,
&rbi->dma_addr,
GFP_KERNEL);
if (!data) {
rq->stats.rx_buf_alloc_failure++;
break;
}
rbi->page = virt_to_page(data);
val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
} else if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
if (rbi->skb == NULL) {
rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
rbi->len,
GFP_KERNEL);
if (unlikely(rbi->skb == NULL)) {
rq->stats.rx_buf_alloc_failure++;
break;
}
rbi->dma_addr = dma_map_single(
&adapter->pdev->dev,
rbi->skb->data, rbi->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
dev_kfree_skb_any(rbi->skb);
rbi->skb = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
} else {
/* rx buffer skipped by the device */
}
val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
} else {
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
rbi->len != PAGE_SIZE);
if (rbi->page == NULL) {
rbi->page = alloc_page(GFP_ATOMIC);
if (unlikely(rbi->page == NULL)) {
rq->stats.rx_buf_alloc_failure++;
break;
}
rbi->dma_addr = dma_map_page(
&adapter->pdev->dev,
rbi->page, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
rbi->dma_addr)) {
put_page(rbi->page);
rbi->page = NULL;
rq->stats.rx_buf_alloc_failure++;
break;
}
} else {
/* rx buffers skipped by the device */
}
val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
}
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
| val | rbi->len);
/* Fill the last buffer but dont mark it ready, or else the
* device will think that the queue is full */
if (num_allocated == num_to_alloc) {
rbi->comp_state = VMXNET3_RXD_COMP_DONE;
break;
}
gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
netdev_dbg(adapter->netdev,
"alloc_rx_buf: %d allocated, next2fill %u, next2comp %u\n",
num_allocated, ring->next2fill, ring->next2comp);
/* so that the device can distinguish a full ring and an empty ring */
BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
return num_allocated;
}
static void
vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_buf_info *rbi)
{
skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len);
skb->data_len += rcd->len;
skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
}
static int
vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
struct vmxnet3_adapter *adapter)
{
u32 dw2, len;
unsigned long buf_offset;
int i;
union Vmxnet3_GenericDesc *gdesc;
struct vmxnet3_tx_buf_info *tbi = NULL;
BUG_ON(ctx->copy_size > skb_headlen(skb));
/* use the previous gen bit for the SOP desc */
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
gdesc = ctx->sop_txd; /* both loops below can be skipped */
/* no need to map the buffer if headers are copied */
if (ctx->copy_size) {
ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
tq->tx_ring.next2fill *
tq->txdata_desc_size);
ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
ctx->sop_txd->dword[3] = 0;
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_NONE;
netdev_dbg(adapter->netdev,
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill,
le64_to_cpu(ctx->sop_txd->txd.addr),
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
/* use the right gen for non-SOP desc */
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
}
/* linear part can use multiple tx desc if it's big */
len = skb_headlen(skb) - ctx->copy_size;
buf_offset = ctx->copy_size;
while (len) {
u32 buf_size;
if (len < VMXNET3_MAX_TX_BUF_SIZE) {
buf_size = len;
dw2 |= len;
} else {
buf_size = VMXNET3_MAX_TX_BUF_SIZE;
/* spec says that for TxDesc.len, 0 == 2^14 */
}
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_SINGLE;
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
skb->data + buf_offset, buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
return -EFAULT;
tbi->len = buf_size;
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
gdesc->dword[2] = cpu_to_le32(dw2);
gdesc->dword[3] = 0;
netdev_dbg(adapter->netdev,
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
len -= buf_size;
buf_offset += buf_size;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 buf_size;
buf_offset = 0;
len = skb_frag_size(frag);
while (len) {
tbi = tq->buf_info + tq->tx_ring.next2fill;
if (len < VMXNET3_MAX_TX_BUF_SIZE) {
buf_size = len;
dw2 |= len;
} else {
buf_size = VMXNET3_MAX_TX_BUF_SIZE;
/* spec says that for TxDesc.len, 0 == 2^14 */
}
tbi->map_type = VMXNET3_MAP_PAGE;
tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
buf_offset, buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
return -EFAULT;
tbi->len = buf_size;
gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
gdesc->dword[2] = cpu_to_le32(dw2);
gdesc->dword[3] = 0;
netdev_dbg(adapter->netdev,
"txd[%u]: 0x%llx %u %u\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
len -= buf_size;
buf_offset += buf_size;
}
}
ctx->eop_txd = gdesc;
/* set the last buf_info for the pkt */
tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
return 0;
}
/* Init all tx queues */
static void
vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
}
/*
* parse relevant protocol headers:
* For a tso pkt, relevant headers are L2/3/4 including options
* For a pkt requesting csum offloading, they are L2/3 and may include L4
* if it's a TCP/UDP pkt
*
* Returns:
* -1: error happens during parsing
* 0: protocol headers parsed, but too big to be copied
* 1: protocol headers parsed and copied
*
* Other effects:
* 1. related *ctx fields are updated.
* 2. ctx->copy_size is # of bytes copied
* 3. the portion to be copied is guaranteed to be in the linear part
*
*/
static int
vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_adapter *adapter)
{
u8 protocol = 0;
if (ctx->mss) { /* TSO */
if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
ctx->l4_offset = skb_inner_transport_offset(skb);
ctx->l4_hdr_size = inner_tcp_hdrlen(skb);
ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
} else {
ctx->l4_offset = skb_transport_offset(skb);
ctx->l4_hdr_size = tcp_hdrlen(skb);
ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size;
}
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
/* For encap packets, skb_checksum_start_offset refers
* to inner L4 offset. Thus, below works for encap as
* well as non-encap case
*/
ctx->l4_offset = skb_checksum_start_offset(skb);
if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation) {
struct iphdr *iph = inner_ip_hdr(skb);
if (iph->version == 4) {
protocol = iph->protocol;
} else {
const struct ipv6hdr *ipv6h;
ipv6h = inner_ipv6_hdr(skb);
protocol = ipv6h->nexthdr;
}
} else {
if (ctx->ipv4) {
const struct iphdr *iph = ip_hdr(skb);
protocol = iph->protocol;
} else if (ctx->ipv6) {
const struct ipv6hdr *ipv6h;
ipv6h = ipv6_hdr(skb);
protocol = ipv6h->nexthdr;
}
}
switch (protocol) {
case IPPROTO_TCP:
ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) :
tcp_hdrlen(skb);
break;
case IPPROTO_UDP:
ctx->l4_hdr_size = sizeof(struct udphdr);
break;
default:
ctx->l4_hdr_size = 0;
break;
}
ctx->copy_size = min(ctx->l4_offset +
ctx->l4_hdr_size, skb->len);
} else {
ctx->l4_offset = 0;
ctx->l4_hdr_size = 0;
/* copy as much as allowed */
ctx->copy_size = min_t(unsigned int,
tq->txdata_desc_size,
skb_headlen(skb));
}
if (skb->len <= VMXNET3_HDR_COPY_SIZE)
ctx->copy_size = skb->len;
/* make sure headers are accessible directly */
if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
goto err;
}
if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
tq->stats.oversized_hdr++;
ctx->copy_size = 0;
return 0;
}
return 1;
err:
return -1;
}
/*
* copy relevant protocol headers to the transmit ring:
* For a tso pkt, relevant headers are L2/3/4 including options
* For a pkt requesting csum offloading, they are L2/3 and may include L4
* if it's a TCP/UDP pkt
*
*
* Note that this requires that vmxnet3_parse_hdr be called first to set the
* appropriate bits in ctx first
*/
static void
vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_tx_ctx *ctx,
struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_TxDataDesc *tdd;
tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
tq->tx_ring.next2fill *
tq->txdata_desc_size);
memcpy(tdd->data, skb->data, ctx->copy_size);
netdev_dbg(adapter->netdev,
"copy %u bytes to dataRing[%u]\n",
ctx->copy_size, tq->tx_ring.next2fill);
}
static void
vmxnet3_prepare_inner_tso(struct sk_buff *skb,
struct vmxnet3_tx_ctx *ctx)
{
struct tcphdr *tcph = inner_tcp_hdr(skb);
struct iphdr *iph = inner_ip_hdr(skb);
if (iph->version == 4) {
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0);
} else {
struct ipv6hdr *iph = inner_ipv6_hdr(skb);
tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
IPPROTO_TCP, 0);
}
}
static void
vmxnet3_prepare_tso(struct sk_buff *skb,
struct vmxnet3_tx_ctx *ctx)
{
struct tcphdr *tcph = tcp_hdr(skb);
if (ctx->ipv4) {
struct iphdr *iph = ip_hdr(skb);
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
IPPROTO_TCP, 0);
} else if (ctx->ipv6) {
tcp_v6_gso_csum_prep(skb);
}
}
static int txd_estimate(const struct sk_buff *skb)
{
int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
}
return count;
}
/*
* Transmits a pkt thru a given tq
* Returns:
* NETDEV_TX_OK: descriptors are setup successfully
* NETDEV_TX_OK: error occurred, the pkt is dropped
* NETDEV_TX_BUSY: tx ring is full, queue is stopped
*
* Side-effects:
* 1. tx ring may be changed
* 2. tq stats may be updated accordingly
* 3. shared->txNumDeferred may be updated
*/
static int
vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter, struct net_device *netdev)
{
int ret;
u32 count;
int num_pkts;
int tx_num_deferred;
unsigned long flags;
struct vmxnet3_tx_ctx ctx;
union Vmxnet3_GenericDesc *gdesc;
#ifdef __BIG_ENDIAN_BITFIELD
/* Use temporary descriptor to avoid touching bits multiple times */
union Vmxnet3_GenericDesc tempTxDesc;
#endif
count = txd_estimate(skb);
ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
ctx.ipv6 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IPV6));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
if (skb_header_cloned(skb)) {
if (unlikely(pskb_expand_head(skb, 0, 0,
GFP_ATOMIC) != 0)) {
tq->stats.drop_tso++;
goto drop_pkt;
}
tq->stats.copy_skb_header++;
}
if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
/* tso pkts must not use more than
* VMXNET3_MAX_TSO_TXD_PER_PKT entries
*/
if (skb_linearize(skb) != 0) {
tq->stats.drop_too_many_frags++;
goto drop_pkt;
}
tq->stats.linearized++;
/* recalculate the # of descriptors to use */
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
if (unlikely(count > VMXNET3_MAX_TSO_TXD_PER_PKT)) {
tq->stats.drop_too_many_frags++;
goto drop_pkt;
}
}
if (skb->encapsulation) {
vmxnet3_prepare_inner_tso(skb, &ctx);
} else {
vmxnet3_prepare_tso(skb, &ctx);
}
} else {
if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
/* non-tso pkts must not use more than
* VMXNET3_MAX_TXD_PER_PKT entries
*/
if (skb_linearize(skb) != 0) {
tq->stats.drop_too_many_frags++;
goto drop_pkt;
}
tq->stats.linearized++;
/* recalculate the # of descriptors to use */
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
}
}
ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
if (ret >= 0) {
BUG_ON(ret <= 0 && ctx.copy_size != 0);
/* hdrs parsed, check against other limits */
if (ctx.mss) {
if (unlikely(ctx.l4_offset + ctx.l4_hdr_size >
VMXNET3_MAX_TX_BUF_SIZE)) {
tq->stats.drop_oversized_hdr++;
goto drop_pkt;
}
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (unlikely(ctx.l4_offset +
skb->csum_offset >
VMXNET3_MAX_CSUM_OFFSET)) {
tq->stats.drop_oversized_hdr++;
goto drop_pkt;
}
}
}
} else {
tq->stats.drop_hdr_inspect_err++;
goto drop_pkt;
}
spin_lock_irqsave(&tq->tx_lock, flags);
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
tq->stats.tx_ring_full++;
netdev_dbg(adapter->netdev,
"tx queue stopped on %s, next2comp %u"
" next2fill %u\n", adapter->netdev->name,
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
vmxnet3_tq_stop(tq, adapter);
spin_unlock_irqrestore(&tq->tx_lock, flags);
return NETDEV_TX_BUSY;
}
vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
/* fill tx descs related to addr & len */
if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
goto unlock_drop_pkt;
/* setup the EOP desc */
ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
/* setup the SOP desc */
#ifdef __BIG_ENDIAN_BITFIELD
gdesc = &tempTxDesc;
gdesc->dword[2] = ctx.sop_txd->dword[2];
gdesc->dword[3] = ctx.sop_txd->dword[3];
#else
gdesc = ctx.sop_txd;
#endif
tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
if (ctx.mss) {
if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
if (VMXNET3_VERSION_GE_7(adapter)) {
gdesc->txd.om = VMXNET3_OM_TSO;
gdesc->txd.ext1 = 1;
} else {
gdesc->txd.om = VMXNET3_OM_ENCAP;
}
gdesc->txd.msscof = ctx.mss;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
gdesc->txd.oco = 1;
} else {
gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size;
gdesc->txd.om = VMXNET3_OM_TSO;
gdesc->txd.msscof = ctx.mss;
}
num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
} else {
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation) {
gdesc->txd.hlen = ctx.l4_offset +
ctx.l4_hdr_size;
if (VMXNET3_VERSION_GE_7(adapter)) {
gdesc->txd.om = VMXNET3_OM_CSUM;
gdesc->txd.msscof = ctx.l4_offset +
skb->csum_offset;
gdesc->txd.ext1 = 1;
} else {
gdesc->txd.om = VMXNET3_OM_ENCAP;
gdesc->txd.msscof = 0; /* Reserved */
}
} else {
gdesc->txd.hlen = ctx.l4_offset;
gdesc->txd.om = VMXNET3_OM_CSUM;
gdesc->txd.msscof = ctx.l4_offset +
skb->csum_offset;
}
} else {
gdesc->txd.om = 0;
gdesc->txd.msscof = 0;
}
num_pkts = 1;
}
le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
tx_num_deferred += num_pkts;
if (skb_vlan_tag_present(skb)) {
gdesc->txd.ti = 1;
gdesc->txd.tci = skb_vlan_tag_get(skb);
}
/* Ensure that the write to (&gdesc->txd)->gen will be observed after
* all other writes to &gdesc->txd.
*/
dma_wmb();
/* finally flips the GEN bit of the SOP desc. */
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
#ifdef __BIG_ENDIAN_BITFIELD
/* Finished updating in bitfields of Tx Desc, so write them in original
* place.
*/
vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
(struct Vmxnet3_TxDesc *)ctx.sop_txd);
gdesc = ctx.sop_txd;
#endif
netdev_dbg(adapter->netdev,
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
(u32)(ctx.sop_txd -
tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
spin_unlock_irqrestore(&tq->tx_lock, flags);
if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
tq->shared->txNumDeferred = 0;
VMXNET3_WRITE_BAR0_REG(adapter,
adapter->tx_prod_offset + tq->qid * 8,
tq->tx_ring.next2fill);
}
return NETDEV_TX_OK;
unlock_drop_pkt:
spin_unlock_irqrestore(&tq->tx_lock, flags);
drop_pkt:
tq->stats.drop_total++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static int
vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
struct vmxnet3_rx_queue *rq, int size)
{
bool xdp_prog = vmxnet3_xdp_enabled(adapter);
const struct page_pool_params pp_params = {
.order = 0,
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = size,
.nid = NUMA_NO_NODE,
.dev = &adapter->pdev->dev,
.offset = VMXNET3_XDP_RX_OFFSET,
.max_len = VMXNET3_XDP_MAX_FRSIZE,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
};
struct page_pool *pp;
int err;
pp = page_pool_create(&pp_params);
if (IS_ERR(pp))
return PTR_ERR(pp);
err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
rq->napi.napi_id);
if (err < 0)
goto err_free_pp;
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, pp);
if (err)
goto err_unregister_rxq;
rq->page_pool = pp;
return 0;
err_unregister_rxq:
xdp_rxq_info_unreg(&rq->xdp_rxq);
err_free_pp:
page_pool_destroy(pp);
return err;
}
void *
vmxnet3_pp_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
gfp_t gfp_mask)
{
struct page *page;
page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
if (unlikely(!page))
return NULL;
*dma_addr = page_pool_get_dma_addr(page) + pp->p.offset;
return page_address(page);
}
static netdev_tx_t
vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
return vmxnet3_tq_xmit(skb,
&adapter->tx_queue[skb->queue_mapping],
adapter, netdev);
}
static void
vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
struct sk_buff *skb,
union Vmxnet3_GenericDesc *gdesc)
{
if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
if (gdesc->rcd.v4 &&
(le32_to_cpu(gdesc->dword[3]) &
VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if ((le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
skb->csum_level = 1;
}
WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
!(le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
WARN_ON_ONCE(gdesc->rcd.frg &&
!(le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
} else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
(1 << VMXNET3_RCD_TUC_SHIFT))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if ((le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
skb->csum_level = 1;
}
WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
!(le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
WARN_ON_ONCE(gdesc->rcd.frg &&
!(le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
} else {
if (gdesc->rcd.csum) {
skb->csum = htons(gdesc->rcd.csum);
skb->ip_summed = CHECKSUM_PARTIAL;
} else {
skb_checksum_none_assert(skb);
}
}
} else {
skb_checksum_none_assert(skb);
}
}
static void
vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
{
rq->stats.drop_err++;
if (!rcd->fcs)
rq->stats.drop_fcs++;
rq->stats.drop_total++;
/*
* We do not unmap and chain the rx buffer to the skb.
* We basically pretend this buffer is not used and will be recycled
* by vmxnet3_rq_alloc_rx_buf()
*/
/*
* ctx->skb may be NULL if this is the first and the only one
* desc for the pkt
*/
if (ctx->skb)
dev_kfree_skb_irq(ctx->skb);
ctx->skb = NULL;
}
static u32
vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
union Vmxnet3_GenericDesc *gdesc)
{
u32 hlen, maplen;
union {
void *ptr;
struct ethhdr *eth;
struct vlan_ethhdr *veth;
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
struct tcphdr *tcp;
} hdr;
BUG_ON(gdesc->rcd.tcp == 0);
maplen = skb_headlen(skb);
if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen))
return 0;
if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
skb->protocol == cpu_to_be16(ETH_P_8021AD))
hlen = sizeof(struct vlan_ethhdr);
else
hlen = sizeof(struct ethhdr);
hdr.eth = eth_hdr(skb);
if (gdesc->rcd.v4) {
BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) &&
hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP));
hdr.ptr += hlen;
BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP);
hlen = hdr.ipv4->ihl << 2;
hdr.ptr += hdr.ipv4->ihl << 2;
} else if (gdesc->rcd.v6) {
BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) &&
hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6));
hdr.ptr += hlen;
/* Use an estimated value, since we also need to handle
* TSO case.
*/
if (hdr.ipv6->nexthdr != IPPROTO_TCP)
return sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
hlen = sizeof(struct ipv6hdr);
hdr.ptr += sizeof(struct ipv6hdr);
} else {
/* Non-IP pkt, dont estimate header length */
return 0;
}
if (hlen + sizeof(struct tcphdr) > maplen)
return 0;
return (hlen + (hdr.tcp->doff << 2));
}
static int
vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter, int quota)
{
u32 rxprod_reg[2] = {
adapter->rx_prod_offset, adapter->rx_prod2_offset
};
u32 num_pkts = 0;
bool skip_page_frags = false;
bool encap_lro = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
u16 segCnt = 0, mss = 0;
int comp_offset, fill_offset;
#ifdef __BIG_ENDIAN_BITFIELD
struct Vmxnet3_RxDesc rxCmdDesc;
struct Vmxnet3_RxCompDesc rxComp;
#endif
bool need_flush = false;
vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
&rxComp);
while (rcd->gen == rq->comp_ring.gen) {
struct vmxnet3_rx_buf_info *rbi;
struct sk_buff *skb, *new_skb = NULL;
struct page *new_page = NULL;
dma_addr_t new_dma_addr;
int num_to_alloc;
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
struct vmxnet3_cmd_ring *ring = NULL;
if (num_pkts >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
*/
break;
}
/* Prevent any rcd field from being (speculatively) read before
* rcd->gen is read.
*/
dma_rmb();
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 &&
rcd->rqID != rq->dataRingQid);
idx = rcd->rxdIdx;
ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
ring = rq->rx_ring + ring_idx;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
BUG_ON(rxd->addr != rbi->dma_addr ||
rxd->len != rbi->len);
if (unlikely(rcd->eop && rcd->err)) {
vmxnet3_rx_error(rq, rcd, ctx, adapter);
goto rcd_done;
}
if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
struct sk_buff *skb_xdp_pass;
int act;
if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
ctx->skb = NULL;
goto skip_xdp; /* Handle it later. */
}
if (rbi->buf_type != VMXNET3_RX_BUF_XDP)
goto rcd_done;
act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
&skb_xdp_pass);
if (act == XDP_PASS) {
ctx->skb = skb_xdp_pass;
goto sop_done;
}
ctx->skb = NULL;
need_flush |= act == XDP_REDIRECT;
goto rcd_done;
}
skip_xdp:
if (rcd->sop) { /* first buf of the pkt */
bool rxDataRingUsed;
u16 len;
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
(rcd->rqID != rq->qid &&
rcd->rqID != rq->dataRingQid));
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB &&
rbi->buf_type != VMXNET3_RX_BUF_XDP);
BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
if (unlikely(rcd->len == 0)) {
/* Pretend the rx buffer is skipped. */
BUG_ON(!(rcd->sop && rcd->eop));
netdev_dbg(adapter->netdev,
"rxRing[%u][%u] 0 length\n",
ring_idx, idx);
goto rcd_done;
}
skip_page_frags = false;
ctx->skb = rbi->skb;
rxDataRingUsed =
VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
len = rxDataRingUsed ? rcd->len : rbi->len;
if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
struct sk_buff *skb_xdp_pass;
size_t sz;
int act;
sz = rcd->rxdIdx * rq->data_ring.desc_size;
act = vmxnet3_process_xdp_small(adapter, rq,
&rq->data_ring.base[sz],
rcd->len,
&skb_xdp_pass);
if (act == XDP_PASS) {
ctx->skb = skb_xdp_pass;
goto sop_done;
}
need_flush |= act == XDP_REDIRECT;
goto rcd_done;
}
new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
len);
if (new_skb == NULL) {
/* Skb allocation failed, do not handover this
* skb to stack. Reuse it. Drop the existing pkt
*/
rq->stats.rx_buf_alloc_failure++;
ctx->skb = NULL;
rq->stats.drop_total++;
skip_page_frags = true;
goto rcd_done;
}
if (rxDataRingUsed && adapter->rxdataring_enabled) {
size_t sz;
BUG_ON(rcd->len > rq->data_ring.desc_size);
ctx->skb = new_skb;
sz = rcd->rxdIdx * rq->data_ring.desc_size;
memcpy(new_skb->data,
&rq->data_ring.base[sz], rcd->len);
} else {
ctx->skb = rbi->skb;
new_dma_addr =
dma_map_single(&adapter->pdev->dev,
new_skb->data, rbi->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
new_dma_addr)) {
dev_kfree_skb(new_skb);
/* Skb allocation failed, do not
* handover this skb to stack. Reuse
* it. Drop the existing pkt.
*/
rq->stats.rx_buf_alloc_failure++;
ctx->skb = NULL;
rq->stats.drop_total++;
skip_page_frags = true;
goto rcd_done;
}
dma_unmap_single(&adapter->pdev->dev,
rbi->dma_addr,
rbi->len,
DMA_FROM_DEVICE);
/* Immediate refill */
rbi->skb = new_skb;
rbi->dma_addr = new_dma_addr;
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
}
skb_record_rx_queue(ctx->skb, rq->qid);
skb_put(ctx->skb, rcd->len);
if (VMXNET3_VERSION_GE_2(adapter) &&
rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
struct Vmxnet3_RxCompDescExt *rcdlro;
union Vmxnet3_GenericDesc *gdesc;
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
gdesc = (union Vmxnet3_GenericDesc *)rcd;
segCnt = rcdlro->segCnt;
WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
encap_lro = (le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
} else {
segCnt = 0;
}
} else {
BUG_ON(ctx->skb == NULL && !skip_page_frags);
/* non SOP buffer must be type 1 in most cases */
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
/* If an sop buffer was dropped, skip all
* following non-sop fragments. They will be reused.
*/
if (skip_page_frags)
goto rcd_done;
if (rcd->len) {
new_page = alloc_page(GFP_ATOMIC);
/* Replacement page frag could not be allocated.
* Reuse this page. Drop the pkt and free the
* skb which contained this page as a frag. Skip
* processing all the following non-sop frags.
*/
if (unlikely(!new_page)) {
rq->stats.rx_buf_alloc_failure++;
dev_kfree_skb(ctx->skb);
ctx->skb = NULL;
skip_page_frags = true;
goto rcd_done;
}
new_dma_addr = dma_map_page(&adapter->pdev->dev,
new_page,
0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
new_dma_addr)) {
put_page(new_page);
rq->stats.rx_buf_alloc_failure++;
dev_kfree_skb(ctx->skb);
ctx->skb = NULL;
skip_page_frags = true;
goto rcd_done;
}
dma_unmap_page(&adapter->pdev->dev,
rbi->dma_addr, rbi->len,
DMA_FROM_DEVICE);
vmxnet3_append_frag(ctx->skb, rcd, rbi);
/* Immediate refill */
rbi->page = new_page;
rbi->dma_addr = new_dma_addr;
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
}
}
sop_done:
skb = ctx->skb;
if (rcd->eop) {
u32 mtu = adapter->netdev->mtu;
skb->len += skb->data_len;
#ifdef VMXNET3_RSS
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
(adapter->netdev->features & NETIF_F_RXHASH)) {
enum pkt_hash_types hash_type;
switch (rcd->rssType) {
case VMXNET3_RCD_RSS_TYPE_IPV4:
case VMXNET3_RCD_RSS_TYPE_IPV6:
hash_type = PKT_HASH_TYPE_L3;
break;
case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
hash_type = PKT_HASH_TYPE_L4;
break;
default:
hash_type = PKT_HASH_TYPE_L3;
break;
}
skb_set_hash(skb,
le32_to_cpu(rcd->rssHash),
hash_type);
}
#endif
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
if ((!rcd->tcp && !encap_lro) ||
!(adapter->netdev->features & NETIF_F_LRO))
goto not_lro;
if (segCnt != 0 && mss != 0) {
skb_shinfo(skb)->gso_type = rcd->v4 ?
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_segs = segCnt;
} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
u32 hlen;
hlen = vmxnet3_get_hdr_len(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
if (hlen == 0)
goto not_lro;
skb_shinfo(skb)->gso_type =
rcd->v4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
if (segCnt != 0) {
skb_shinfo(skb)->gso_segs = segCnt;
skb_shinfo(skb)->gso_size =
DIV_ROUND_UP(skb->len -
hlen, segCnt);
} else {
skb_shinfo(skb)->gso_size = mtu - hlen;
}
}
not_lro:
if (unlikely(rcd->ts))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rcd->tci);
/* Use GRO callback if UPT is enabled */
if ((adapter->netdev->features & NETIF_F_LRO) &&
!rq->shared->updateRxProd)
netif_receive_skb(skb);
else
napi_gro_receive(&rq->napi, skb);
ctx->skb = NULL;
encap_lro = false;
num_pkts++;
}
rcd_done:
/* device may have skipped some rx descs */
ring = rq->rx_ring + ring_idx;
rbi->comp_state = VMXNET3_RXD_COMP_DONE;
comp_offset = vmxnet3_cmd_ring_desc_avail(ring);
fill_offset = (idx > ring->next2fill ? 0 : ring->size) +
idx - ring->next2fill - 1;
if (!ring->isOutOfOrder || fill_offset >= comp_offset)
ring->next2comp = idx;
num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
/* Ensure that the writes to rxd->gen bits will be observed
* after all other writes to rxd objects.
*/
dma_wmb();
while (num_to_alloc) {
rbi = rq->buf_info[ring_idx] + ring->next2fill;
if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
goto refill_buf;
if (ring_idx == 0) {
/* ring0 Type1 buffers can get skipped; re-fill them */
if (rbi->buf_type != VMXNET3_RX_BUF_SKB)
goto refill_buf;
}
if (rbi->comp_state == VMXNET3_RXD_COMP_DONE) {
refill_buf:
vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
&rxCmdDesc);
WARN_ON(!rxd->addr);
/* Recv desc is ready to be used by the device */
rxd->gen = ring->gen;
vmxnet3_cmd_ring_adv_next2fill(ring);
rbi->comp_state = VMXNET3_RXD_COMP_PENDING;
num_to_alloc--;
} else {
/* rx completion hasn't occurred */
ring->isOutOfOrder = 1;
break;
}
}
if (num_to_alloc == 0) {
ring->isOutOfOrder = 0;
}
/* if needed, update the register */
if (unlikely(rq->shared->updateRxProd) && (ring->next2fill & 0xf) == 0) {
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
ring->next2fill);
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
vmxnet3_getRxComp(rcd,
&rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
}
if (need_flush)
xdp_do_flush();
return num_pkts;
}
static void
vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
u32 i, ring_idx;
struct Vmxnet3_RxDesc *rxd;
/* ring has already been cleaned up */
if (!rq->rx_ring[0].base)
return;
for (ring_idx = 0; ring_idx < 2; ring_idx++) {
for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
struct vmxnet3_rx_buf_info *rbi;
#ifdef __BIG_ENDIAN_BITFIELD
struct Vmxnet3_RxDesc rxDesc;
#endif
rbi = &rq->buf_info[ring_idx][i];
vmxnet3_getRxDesc(rxd,
&rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
rbi->page && rbi->buf_type == VMXNET3_RX_BUF_XDP) {
page_pool_recycle_direct(rq->page_pool,
rbi->page);
rbi->page = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
rbi->skb) {
dma_unmap_single(&adapter->pdev->dev, rxd->addr,
rxd->len, DMA_FROM_DEVICE);
dev_kfree_skb(rbi->skb);
rbi->skb = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
rbi->page) {
dma_unmap_page(&adapter->pdev->dev, rxd->addr,
rxd->len, DMA_FROM_DEVICE);
put_page(rbi->page);
rbi->page = NULL;
}
}
rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
rq->rx_ring[ring_idx].next2fill =
rq->rx_ring[ring_idx].next2comp = 0;
}
rq->comp_ring.gen = VMXNET3_INIT_GEN;
rq->comp_ring.next2proc = 0;
}
static void
vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
}
static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
int i;
int j;
/* all rx buffers must have already been freed */
for (i = 0; i < 2; i++) {
if (rq->buf_info[i]) {
for (j = 0; j < rq->rx_ring[i].size; j++)
BUG_ON(rq->buf_info[i][j].page != NULL);
}
}
for (i = 0; i < 2; i++) {
if (rq->rx_ring[i].base) {
dma_free_coherent(&adapter->pdev->dev,
rq->rx_ring[i].size
* sizeof(struct Vmxnet3_RxDesc),
rq->rx_ring[i].base,
rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
rq->page_pool = NULL;
if (rq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
rq->rx_ring[0].size * rq->data_ring.desc_size,
rq->data_ring.base, rq->data_ring.basePA);
rq->data_ring.base = NULL;
}
if (rq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
* sizeof(struct Vmxnet3_RxCompDesc),
rq->comp_ring.base, rq->comp_ring.basePA);
rq->comp_ring.base = NULL;
}
kfree(rq->buf_info[0]);
rq->buf_info[0] = NULL;
rq->buf_info[1] = NULL;
}
static void
vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
if (rq->data_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
(rq->rx_ring[0].size *
rq->data_ring.desc_size),
rq->data_ring.base,
rq->data_ring.basePA);
rq->data_ring.base = NULL;
rq->data_ring.desc_size = 0;
}
}
}
static int
vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
struct vmxnet3_adapter *adapter)
{
int i, err;
/* initialize buf_info */
for (i = 0; i < rq->rx_ring[0].size; i++) {
/* 1st buf for a pkt is skbuff or xdp page */
if (i % adapter->rx_buf_per_pkt == 0) {
rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
VMXNET3_RX_BUF_XDP :
VMXNET3_RX_BUF_SKB;
rq->buf_info[0][i].len = adapter->skb_buf_size;
} else { /* subsequent bufs for a pkt is frag */
rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
rq->buf_info[0][i].len = PAGE_SIZE;
}
}
for (i = 0; i < rq->rx_ring[1].size; i++) {
rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
rq->buf_info[1][i].len = PAGE_SIZE;
}
/* reset internal state and allocate buffers for both rings */
for (i = 0; i < 2; i++) {
rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
sizeof(struct Vmxnet3_RxDesc));
rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
rq->rx_ring[i].isOutOfOrder = 0;
}
err = vmxnet3_create_pp(adapter, rq,
rq->rx_ring[0].size + rq->rx_ring[1].size);
if (err)
return err;
if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
adapter) == 0) {
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
rq->page_pool = NULL;
/* at least has 1 rx buffer for the 1st ring */
return -ENOMEM;
}
vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
/* reset the comp ring */
rq->comp_ring.next2proc = 0;
memset(rq->comp_ring.base, 0, rq->comp_ring.size *
sizeof(struct Vmxnet3_RxCompDesc));
rq->comp_ring.gen = VMXNET3_INIT_GEN;
/* reset rxctx */
rq->rx_ctx.skb = NULL;
/* stats are not reset */
return 0;
}
static int
vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
{
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
if (unlikely(err)) {
dev_err(&adapter->netdev->dev, "%s: failed to "
"initialize rx queue%i\n",
adapter->netdev->name, i);
break;
}
}
return err;
}
static int
vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
{
int i;
size_t sz;
struct vmxnet3_rx_buf_info *bi;
for (i = 0; i < 2; i++) {
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
rq->rx_ring[i].base = dma_alloc_coherent(
&adapter->pdev->dev, sz,
&rq->rx_ring[i].basePA,
GFP_KERNEL);
if (!rq->rx_ring[i].base) {
netdev_err(adapter->netdev,
"failed to allocate rx ring %d\n", i);
goto err;
}
}
if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
sz = rq->rx_ring[0].size * rq->data_ring.desc_size;
rq->data_ring.base =
dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->data_ring.basePA,
GFP_KERNEL);
if (!rq->data_ring.base) {
netdev_err(adapter->netdev,
"rx data ring will be disabled\n");
adapter->rxdataring_enabled = false;
}
} else {
rq->data_ring.base = NULL;
rq->data_ring.desc_size = 0;
}
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->comp_ring.basePA,
GFP_KERNEL);
if (!rq->comp_ring.base) {
netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
goto err;
}
bi = kcalloc_node(rq->rx_ring[0].size + rq->rx_ring[1].size,
sizeof(rq->buf_info[0][0]), GFP_KERNEL,
dev_to_node(&adapter->pdev->dev));
if (!bi)
goto err;
rq->buf_info[0] = bi;
rq->buf_info[1] = bi + rq->rx_ring[0].size;
return 0;
err:
vmxnet3_rq_destroy(rq, adapter);
return -ENOMEM;
}
int
vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
{
int i, err = 0;
adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
if (unlikely(err)) {
dev_err(&adapter->netdev->dev,
"%s: failed to create rx queue%i\n",
adapter->netdev->name, i);
goto err_out;
}
}
if (!adapter->rxdataring_enabled)
vmxnet3_rq_destroy_all_rxdataring(adapter);
return err;
err_out:
vmxnet3_rq_destroy_all(adapter);
return err;
}
/* Multiple queue aware polling function for tx and rx */
static int
vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
{
int rcd_done = 0, i;
if (unlikely(adapter->shared->ecr))
vmxnet3_process_events(adapter);
for (i = 0; i < adapter->num_tx_queues; i++)
vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
adapter, budget);
return rcd_done;
}
static int
vmxnet3_poll(struct napi_struct *napi, int budget)
{
struct vmxnet3_rx_queue *rx_queue = container_of(napi,
struct vmxnet3_rx_queue, napi);
int rxd_done;
rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
if (rxd_done < budget) {
napi_complete_done(napi, rxd_done);
vmxnet3_enable_all_intrs(rx_queue->adapter);
}
return rxd_done;
}
/*
* NAPI polling function for MSI-X mode with multiple Rx queues
* Returns the # of the NAPI credit consumed (# of rx descriptors processed)
*/
static int
vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
{
struct vmxnet3_rx_queue *rq = container_of(napi,
struct vmxnet3_rx_queue, napi);
struct vmxnet3_adapter *adapter = rq->adapter;
int rxd_done;
/* When sharing interrupt with corresponding tx queue, process
* tx completions in that queue as well
*/
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
struct vmxnet3_tx_queue *tq =
&adapter->tx_queue[rq - adapter->rx_queue];
vmxnet3_tq_tx_complete(tq, adapter);
}
rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
if (rxd_done < budget) {
napi_complete_done(napi, rxd_done);
vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
}
return rxd_done;
}
#ifdef CONFIG_PCI_MSI
/*
* Handle completion interrupts on tx queues
* Returns whether or not the intr is handled
*/
static irqreturn_t
vmxnet3_msix_tx(int irq, void *data)
{
struct vmxnet3_tx_queue *tq = data;
struct vmxnet3_adapter *adapter = tq->adapter;
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
/* Handle the case where only one irq is allocate for all tx queues */
if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
int i;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
vmxnet3_tq_tx_complete(txq, adapter);
}
} else {
vmxnet3_tq_tx_complete(tq, adapter);
}
vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
return IRQ_HANDLED;
}
/*
* Handle completion interrupts on rx queues. Returns whether or not the
* intr is handled
*/
static irqreturn_t
vmxnet3_msix_rx(int irq, void *data)
{
struct vmxnet3_rx_queue *rq = data;
struct vmxnet3_adapter *adapter = rq->adapter;
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
napi_schedule(&rq->napi);
return IRQ_HANDLED;
}
/*
*----------------------------------------------------------------------------
*
* vmxnet3_msix_event --
*
* vmxnet3 msix event intr handler
*
* Result:
* whether or not the intr is handled
*
*----------------------------------------------------------------------------
*/
static irqreturn_t
vmxnet3_msix_event(int irq, void *data)
{
struct net_device *dev = data;
struct vmxnet3_adapter *adapter = netdev_priv(dev);
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
if (adapter->shared->ecr)
vmxnet3_process_events(adapter);
vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
return IRQ_HANDLED;
}
#endif /* CONFIG_PCI_MSI */
/* Interrupt handler for vmxnet3 */
static irqreturn_t
vmxnet3_intr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vmxnet3_adapter *adapter = netdev_priv(dev);
if (adapter->intr.type == VMXNET3_IT_INTX) {
u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
if (unlikely(icr == 0))
/* not ours */
return IRQ_NONE;
}
/* disable intr if needed */
if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
vmxnet3_disable_all_intrs(adapter);
napi_schedule(&adapter->rx_queue[0].napi);
return IRQ_HANDLED;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/* netpoll callback. */
static void
vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
switch (adapter->intr.type) {
#ifdef CONFIG_PCI_MSI
case VMXNET3_IT_MSIX: {
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
break;
}
#endif
case VMXNET3_IT_MSI:
default:
vmxnet3_intr(0, adapter->netdev);
break;
}
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
static int
vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
{
struct vmxnet3_intr *intr = &adapter->intr;
int err = 0, i;
int vector = 0;
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
for (i = 0; i < adapter->num_tx_queues; i++) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
adapter->netdev->name, vector);
err = request_irq(
intr->msix_entries[vector].vector,
vmxnet3_msix_tx, 0,
adapter->tx_queue[i].name,
&adapter->tx_queue[i]);
} else {
sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
adapter->netdev->name, vector);
}
if (err) {
dev_err(&adapter->netdev->dev,
"Failed to request irq for MSIX, %s, "
"error %d\n",
adapter->tx_queue[i].name, err);
return err;
}
/* Handle the case where only 1 MSIx was allocated for
* all tx queues */
if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
for (; i < adapter->num_tx_queues; i++)
adapter->tx_queue[i].comp_ring.intr_idx
= vector;
vector++;
break;
} else {
adapter->tx_queue[i].comp_ring.intr_idx
= vector++;
}
}
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
vector = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
adapter->netdev->name, vector);
else
sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
adapter->netdev->name, vector);
err = request_irq(intr->msix_entries[vector].vector,
vmxnet3_msix_rx, 0,
adapter->rx_queue[i].name,
&(adapter->rx_queue[i]));
if (err) {
netdev_err(adapter->netdev,
"Failed to request irq for MSIX, "
"%s, error %d\n",
adapter->rx_queue[i].name, err);
return err;
}
adapter->rx_queue[i].comp_ring.intr_idx = vector++;
}
sprintf(intr->event_msi_vector_name, "%s-event-%d",
adapter->netdev->name, vector);
err = request_irq(intr->msix_entries[vector].vector,
vmxnet3_msix_event, 0,
intr->event_msi_vector_name, adapter->netdev);
intr->event_intr_idx = vector;
} else if (intr->type == VMXNET3_IT_MSI) {
adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
adapter->netdev->name, adapter->netdev);
} else {
#endif
adapter->num_rx_queues = 1;
err = request_irq(adapter->pdev->irq, vmxnet3_intr,
IRQF_SHARED, adapter->netdev->name,
adapter->netdev);
#ifdef CONFIG_PCI_MSI
}
#endif
intr->num_intrs = vector + 1;
if (err) {
netdev_err(adapter->netdev,
"Failed to request irq (intr type:%d), error %d\n",
intr->type, err);
} else {
/* Number of rx queues will not change after this */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq->qid = i;
rq->qid2 = i + adapter->num_rx_queues;
rq->dataRingQid = i + 2 * adapter->num_rx_queues;
}
/* init our intr settings */
for (i = 0; i < intr->num_intrs; i++)
intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
if (adapter->intr.type != VMXNET3_IT_MSIX) {
adapter->intr.event_intr_idx = 0;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_queue[i].comp_ring.intr_idx = 0;
adapter->rx_queue[0].comp_ring.intr_idx = 0;
}
netdev_info(adapter->netdev,
"intr type %u, mode %u, %u vectors allocated\n",
intr->type, intr->mask_mode, intr->num_intrs);
}
return err;
}
static void
vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
{
struct vmxnet3_intr *intr = &adapter->intr;
BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
switch (intr->type) {
#ifdef CONFIG_PCI_MSI
case VMXNET3_IT_MSIX:
{
int i, vector = 0;
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
for (i = 0; i < adapter->num_tx_queues; i++) {
free_irq(intr->msix_entries[vector++].vector,
&(adapter->tx_queue[i]));
if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
break;
}
}
for (i = 0; i < adapter->num_rx_queues; i++) {
free_irq(intr->msix_entries[vector++].vector,
&(adapter->rx_queue[i]));
}
free_irq(intr->msix_entries[vector].vector,
adapter->netdev);
BUG_ON(vector >= intr->num_intrs);
break;
}
#endif
case VMXNET3_IT_MSI:
free_irq(adapter->pdev->irq, adapter->netdev);
break;
case VMXNET3_IT_INTX:
free_irq(adapter->pdev->irq, adapter->netdev);
break;
default:
BUG();
}
}
static void
vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
{
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
u16 vid;
/* allow untagged pkts */
VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
}
static int
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (!(netdev->flags & IFF_PROMISC)) {
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
unsigned long flags;
VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
set_bit(vid, adapter->active_vlans);
return 0;
}
static int
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (!(netdev->flags & IFF_PROMISC)) {
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
unsigned long flags;
VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
clear_bit(vid, adapter->active_vlans);
return 0;
}
static u8 *
vmxnet3_copy_mc(struct net_device *netdev)
{
u8 *buf = NULL;
u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
if (sz <= 0xffff) {
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
struct netdev_hw_addr *ha;
int i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(buf + i++ * ETH_ALEN, ha->addr,
ETH_ALEN);
}
}
return buf;
}
static void
vmxnet3_set_mc(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
dma_addr_t new_table_pa = 0;
bool new_table_pa_valid = false;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
new_mode |= VMXNET3_RXM_PROMISC;
} else {
vmxnet3_restore_vlan(adapter);
}
if (netdev->flags & IFF_BROADCAST)
new_mode |= VMXNET3_RXM_BCAST;
if (netdev->flags & IFF_ALLMULTI)
new_mode |= VMXNET3_RXM_ALL_MULTI;
else
if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
rxConf->mfTableLen = cpu_to_le16(sz);
new_table_pa = dma_map_single(
&adapter->pdev->dev,
new_table,
sz,
DMA_TO_DEVICE);
if (!dma_mapping_error(&adapter->pdev->dev,
new_table_pa)) {
new_mode |= VMXNET3_RXM_MCAST;
new_table_pa_valid = true;
rxConf->mfTablePA = cpu_to_le64(
new_table_pa);
}
}
if (!new_table_pa_valid) {
netdev_info(netdev,
"failed to copy mcast list, setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
}
}
if (!(new_mode & VMXNET3_RXM_MCAST)) {
rxConf->mfTableLen = 0;
rxConf->mfTablePA = 0;
}
spin_lock_irqsave(&adapter->cmd_lock, flags);
if (new_mode != rxConf->rxMode) {
rxConf->rxMode = cpu_to_le32(new_mode);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_RX_MODE);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (new_table_pa_valid)
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, DMA_TO_DEVICE);
kfree(new_table);
}
void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
}
/*
* Set up driver_shared based on settings in adapter.
*/
static void
vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
struct Vmxnet3_TxQueueConf *tqc;
struct Vmxnet3_RxQueueConf *rqc;
int i;
memset(shared, 0, sizeof(*shared));
/* driver settings */
shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
devRead->misc.driverInfo.version = cpu_to_le32(
VMXNET3_DRIVER_VERSION_NUM);
devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
*((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
*((u32 *)&devRead->misc.driverInfo.gos));
devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
/* set up feature flags */
if (adapter->netdev->features & NETIF_F_RXCSUM)
devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
if (adapter->netdev->features & NETIF_F_LRO) {
devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM))
devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD;
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
devRead->misc.queueDescLen = cpu_to_le32(
adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
/* tx queue settings */
devRead->misc.numTxQueues = adapter->num_tx_queues;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
tqc = &adapter->tqd_start[i].conf;
tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
tqc->ddPA = cpu_to_le64(~0ULL);
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
tqc->ddLen = cpu_to_le32(0);
tqc->intrIdx = tq->comp_ring.intr_idx;
}
/* rx queue settings */
devRead->misc.numRxQueues = adapter->num_rx_queues;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rqc = &adapter->rqd_start[i].conf;
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
rqc->ddPA = cpu_to_le64(~0ULL);
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
rqc->ddLen = cpu_to_le32(0);
rqc->intrIdx = rq->comp_ring.intr_idx;
if (VMXNET3_VERSION_GE_3(adapter)) {
rqc->rxDataRingBasePA =
cpu_to_le64(rq->data_ring.basePA);
rqc->rxDataRingDescSize =
cpu_to_le16(rq->data_ring.desc_size);
}
}
#ifdef VMXNET3_RSS
memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
if (adapter->rss) {
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
devRead->misc.uptFeatures |= UPT1_F_RSS;
devRead->misc.numRxQueues = adapter->num_rx_queues;
rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
UPT1_RSS_HASH_TYPE_IPV4 |
UPT1_RSS_HASH_TYPE_TCP_IPV6 |
UPT1_RSS_HASH_TYPE_IPV6;
rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
netdev_rss_key_fill(rssConf->hashKey, sizeof(rssConf->hashKey));
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = ethtool_rxfh_indir_default(
i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
devRead->rssConfDesc.confPA =
cpu_to_le64(adapter->rss_conf_pa);
}
#endif /* VMXNET3_RSS */
/* intr settings */
if (!VMXNET3_VERSION_GE_6(adapter) ||
!adapter->queuesExtEnabled) {
devRead->intrConf.autoMask = adapter->intr.mask_mode ==
VMXNET3_IMM_AUTO;
devRead->intrConf.numIntrs = adapter->intr.num_intrs;
for (i = 0; i < adapter->intr.num_intrs; i++)
devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
} else {
devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
VMXNET3_IMM_AUTO;
devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
for (i = 0; i < adapter->intr.num_intrs; i++)
devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
devReadExt->intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
}
/* rx filter settings */
devRead->rxFilterConf.rxMode = 0;
vmxnet3_restore_vlan(adapter);
vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
/* the rest are already zeroed */
}
static void
vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
unsigned long flags;
if (!VMXNET3_VERSION_GE_7(adapter))
return;
cmdInfo->ringBufSize = adapter->ringBufSize;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_SET_RING_BUFFER_SIZE);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
static void
vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
unsigned long flags;
if (!VMXNET3_VERSION_GE_3(adapter))
return;
spin_lock_irqsave(&adapter->cmd_lock, flags);
cmdInfo->varConf.confVer = 1;
cmdInfo->varConf.confLen =
cpu_to_le32(sizeof(*adapter->coal_conf));
cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
if (adapter->default_coal_mode) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_COALESCE);
} else {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_SET_COALESCE);
}
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
static void
vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
{
struct Vmxnet3_DriverShared *shared = adapter->shared;
union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
unsigned long flags;
if (!VMXNET3_VERSION_GE_4(adapter))
return;
spin_lock_irqsave(&adapter->cmd_lock, flags);
if (adapter->default_rss_fields) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_RSS_FIELDS);
adapter->rss_fields =
VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
} else {
if (VMXNET3_VERSION_GE_7(adapter)) {
if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_UDP_RSS)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
} else {
adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
}
if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_ESP_RSS_IPV4)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
} else {
adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
}
if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_ESP_RSS_IPV6)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
} else {
adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
}
cmdInfo->setRssFields = adapter->rss_fields;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_SET_RSS_FIELDS);
/* Not all requested RSS may get applied, so get and
* cache what was actually applied.
*/
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_RSS_FIELDS);
adapter->rss_fields =
VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
}
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
int
vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
{
int err, i;
u32 ret;
unsigned long flags;
netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
" ring sizes %u %u %u\n", adapter->netdev->name,
adapter->skb_buf_size, adapter->rx_buf_per_pkt,
adapter->tx_queue[0].tx_ring.size,
adapter->rx_queue[0].rx_ring[0].size,
adapter->rx_queue[0].rx_ring[1].size);
vmxnet3_tq_init_all(adapter);
err = vmxnet3_rq_init_all(adapter);
if (err) {
netdev_err(adapter->netdev,
"Failed to init rx queue error %d\n", err);
goto rq_err;
}
err = vmxnet3_request_irqs(adapter);
if (err) {
netdev_err(adapter->netdev,
"Failed to setup irq for error %d\n", err);
goto irq_err;
}
vmxnet3_setup_driver_shared(adapter);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
adapter->shared_pa));
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
adapter->shared_pa));
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_ACTIVATE_DEV);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (ret != 0) {
netdev_err(adapter->netdev,
"Failed to activate dev: error %u\n", ret);
err = -EINVAL;
goto activate_err;
}
vmxnet3_init_bufsize(adapter);
vmxnet3_init_coalesce(adapter);
vmxnet3_init_rssfields(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
VMXNET3_WRITE_BAR0_REG(adapter,
adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
adapter->rx_queue[i].rx_ring[0].next2fill);
VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
(i * VMXNET3_REG_ALIGN)),
adapter->rx_queue[i].rx_ring[1].next2fill);
}
/* Apply the rx filter settins last. */
vmxnet3_set_mc(adapter->netdev);
/*
* Check link state when first activating device. It will start the
* tx queue if the link is up.
*/
vmxnet3_check_link(adapter, true);
netif_tx_wake_all_queues(adapter->netdev);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
vmxnet3_enable_all_intrs(adapter);
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
return 0;
activate_err:
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
vmxnet3_free_irqs(adapter);
irq_err:
rq_err:
/* free up buffers we allocated */
vmxnet3_rq_cleanup_all(adapter);
return err;
}
void
vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
{
unsigned long flags;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
{
int i;
unsigned long flags;
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
return 0;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_QUIESCE_DEV);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_disable_all_intrs(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
napi_disable(&adapter->rx_queue[i].napi);
netif_tx_disable(adapter->netdev);
adapter->link_speed = 0;
netif_carrier_off(adapter->netdev);
vmxnet3_tq_cleanup_all(adapter);
vmxnet3_rq_cleanup_all(adapter);
vmxnet3_free_irqs(adapter);
return 0;
}
static void
vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
{
u32 tmp;
tmp = *(u32 *)mac;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
tmp = (mac[5] << 8) | mac[4];
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
}
static int
vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
dev_addr_set(netdev, addr->sa_data);
vmxnet3_write_mac_addr(adapter, addr->sa_data);
return 0;
}
/* ==================== initialization and cleanup routines ============ */
static int
vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
{
int err;
unsigned long mmio_start, mmio_len;
struct pci_dev *pdev = adapter->pdev;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
return err;
}
err = pci_request_selected_regions(pdev, (1 << 2) - 1,
vmxnet3_driver_name);
if (err) {
dev_err(&pdev->dev,
"Failed to request region for adapter: error %d\n", err);
goto err_enable_device;
}
pci_set_master(pdev);
mmio_start = pci_resource_start(pdev, 0);
mmio_len = pci_resource_len(pdev, 0);
adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr0) {
dev_err(&pdev->dev, "Failed to map bar0\n");
err = -EIO;
goto err_ioremap;
}
mmio_start = pci_resource_start(pdev, 1);
mmio_len = pci_resource_len(pdev, 1);
adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
if (!adapter->hw_addr1) {
dev_err(&pdev->dev, "Failed to map bar1\n");
err = -EIO;
goto err_bar1;
}
return 0;
err_bar1:
iounmap(adapter->hw_addr0);
err_ioremap:
pci_release_selected_regions(pdev, (1 << 2) - 1);
err_enable_device:
pci_disable_device(pdev);
return err;
}
static void
vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
{
BUG_ON(!adapter->pdev);
iounmap(adapter->hw_addr0);
iounmap(adapter->hw_addr1);
pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
pci_disable_device(adapter->pdev);
}
void
vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
{
size_t sz, i, ring0_size, ring1_size, comp_size;
/* With version7 ring1 will have only T0 buffers */
if (!VMXNET3_VERSION_GE_7(adapter)) {
if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
VMXNET3_MAX_ETH_HDR_SIZE) {
adapter->skb_buf_size = adapter->netdev->mtu +
VMXNET3_MAX_ETH_HDR_SIZE;
if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
adapter->rx_buf_per_pkt = 1;
} else {
adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
VMXNET3_MAX_ETH_HDR_SIZE;
adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
}
} else {
adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
VMXNET3_MAX_SKB_BUF_SIZE);
adapter->rx_buf_per_pkt = 1;
adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
adapter->ringBufSize.ring1BufSizeType1 = 0;
adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
}
/*
* for simplicity, force the ring0 size to be a multiple of
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
ring0_size = adapter->rx_queue[0].rx_ring[0].size;
ring0_size = (ring0_size + sz - 1) / sz * sz;
ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
sz * sz);
ring1_size = adapter->rx_queue[0].rx_ring[1].size;
ring1_size = (ring1_size + sz - 1) / sz * sz;
ring1_size = min_t(u32, ring1_size, VMXNET3_RX_RING2_MAX_SIZE /
sz * sz);
/* For v7 and later, keep ring size power of 2 for UPT */
if (VMXNET3_VERSION_GE_7(adapter)) {
ring0_size = rounddown_pow_of_two(ring0_size);
ring1_size = rounddown_pow_of_two(ring1_size);
}
comp_size = ring0_size + ring1_size;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
rq->rx_ring[0].size = ring0_size;
rq->rx_ring[1].size = ring1_size;
rq->comp_ring.size = comp_size;
}
}
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
u32 rx_ring_size, u32 rx_ring2_size,
u16 txdata_desc_size, u16 rxdata_desc_size)
{
int err = 0, i;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
tq->tx_ring.size = tx_ring_size;
tq->data_ring.size = tx_ring_size;
tq->comp_ring.size = tx_ring_size;
tq->txdata_desc_size = txdata_desc_size;
tq->shared = &adapter->tqd_start[i].ctrl;
tq->stopped = true;
tq->adapter = adapter;
tq->qid = i;
err = vmxnet3_tq_create(tq, adapter);
/*
* Too late to change num_tx_queues. We cannot do away with
* lesser number of queues than what we asked for
*/
if (err)
goto queue_err;
}
adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
vmxnet3_adjust_rx_ring_size(adapter);
adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
/* qid and qid2 for rx queues will be assigned later when num
* of rx queues is finalized after allocating intrs */
rq->shared = &adapter->rqd_start[i].ctrl;
rq->adapter = adapter;
rq->data_ring.desc_size = rxdata_desc_size;
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
netdev_err(adapter->netdev,
"Could not allocate any rx queues. "
"Aborting.\n");
goto queue_err;
} else {
netdev_info(adapter->netdev,
"Number of rx queues changed "
"to : %d.\n", i);
adapter->num_rx_queues = i;
err = 0;
break;
}
}
}
if (!adapter->rxdataring_enabled)
vmxnet3_rq_destroy_all_rxdataring(adapter);
return err;
queue_err:
vmxnet3_tq_destroy_all(adapter);
return err;
}
static int
vmxnet3_open(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter;
int err, i;
adapter = netdev_priv(netdev);
for (i = 0; i < adapter->num_tx_queues; i++)
spin_lock_init(&adapter->tx_queue[i].tx_lock);
if (VMXNET3_VERSION_GE_3(adapter)) {
unsigned long flags;
u16 txdata_desc_size;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
(txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
(txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
adapter->txdata_desc_size =
sizeof(struct Vmxnet3_TxDataDesc);
} else {
adapter->txdata_desc_size = txdata_desc_size;
}
} else {
adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
}
err = vmxnet3_create_queues(adapter,
adapter->tx_ring_size,
adapter->rx_ring_size,
adapter->rx_ring2_size,
adapter->txdata_desc_size,
adapter->rxdata_desc_size);
if (err)
goto queue_err;
err = vmxnet3_activate_dev(adapter);
if (err)
goto activate_err;
return 0;
activate_err:
vmxnet3_rq_destroy_all(adapter);
vmxnet3_tq_destroy_all(adapter);
queue_err:
return err;
}
static int
vmxnet3_close(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
usleep_range(1000, 2000);
vmxnet3_quiesce_dev(adapter);
vmxnet3_rq_destroy_all(adapter);
vmxnet3_tq_destroy_all(adapter);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
return 0;
}
void
vmxnet3_force_close(struct vmxnet3_adapter *adapter)
{
int i;
/*
* we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
* vmxnet3_close() will deadlock.
*/
BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
/* we need to enable NAPI, otherwise dev_close will deadlock */
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
/*
* Need to clear the quiesce bit to ensure that vmxnet3_close
* can quiesce the device properly
*/
clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
dev_close(adapter->netdev);
}
static int
vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
netdev->mtu = new_mtu;
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (netif_running(netdev)) {
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
/* we need to re-create the rx queue based on the new mtu */
vmxnet3_rq_destroy_all(adapter);
vmxnet3_adjust_rx_ring_size(adapter);
err = vmxnet3_rq_create_all(adapter);
if (err) {
netdev_err(netdev,
"failed to re-create rx queues, "
" error %d. Closing it.\n", err);
goto out;
}
err = vmxnet3_activate_dev(adapter);
if (err) {
netdev_err(netdev,
"failed to re-activate, error %d. "
"Closing it\n", err);
goto out;
}
}
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
if (err)
vmxnet3_force_close(adapter);
return err;
}
static void
vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_LRO | NETIF_F_HIGHDMA;
if (VMXNET3_VERSION_GE_4(adapter)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (VMXNET3_VERSION_GE_7(adapter)) {
unsigned long flags;
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_GENEVE_TSO)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_VXLAN_TSO)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
}
if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
}
netdev->vlan_features = netdev->hw_features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
}
static void
vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
{
u32 tmp;
tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
*(u32 *)mac = tmp;
tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
mac[4] = tmp & 0xff;
mac[5] = (tmp >> 8) & 0xff;
}
#ifdef CONFIG_PCI_MSI
/*
* Enable MSIx vectors.
* Returns :
* VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
* were enabled.
* number of vectors which were enabled otherwise (this number is greater
* than VMXNET3_LINUX_MIN_MSIX_VECT)
*/
static int
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
{
int ret = pci_enable_msix_range(adapter->pdev,
adapter->intr.msix_entries, nvec, nvec);
if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
dev_err(&adapter->netdev->dev,
"Failed to enable %d MSI-X, trying %d\n",
nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
ret = pci_enable_msix_range(adapter->pdev,
adapter->intr.msix_entries,
VMXNET3_LINUX_MIN_MSIX_VECT,
VMXNET3_LINUX_MIN_MSIX_VECT);
}
if (ret < 0) {
dev_err(&adapter->netdev->dev,
"Failed to enable MSI-X, error: %d\n", ret);
}
return ret;
}
#endif /* CONFIG_PCI_MSI */
static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
{
u32 cfg;
unsigned long flags;
/* intr settings */
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_CONF_INTR);
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
adapter->intr.type = cfg & 0x3;
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
if (adapter->intr.type == VMXNET3_IT_AUTO) {
adapter->intr.type = VMXNET3_IT_MSIX;
}
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
int i, nvec, nvec_allocated;
nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
1 : adapter->num_tx_queues;
nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
0 : adapter->num_rx_queues;
nvec += 1; /* for link event */
nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
for (i = 0; i < nvec; i++)
adapter->intr.msix_entries[i].entry = i;
nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
if (nvec_allocated < 0)
goto msix_err;
/* If we cannot allocate one MSIx vector per queue
* then limit the number of rx queues to 1
*/
if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
netdev_err(adapter->netdev,
"Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
}
}
adapter->intr.num_intrs = nvec_allocated;
return;
msix_err:
/* If we cannot allocate MSIx vectors use only one rx queue */
dev_info(&adapter->pdev->dev,
"Failed to enable MSI-X, error %d. "
"Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
adapter->intr.type = VMXNET3_IT_MSI;
}
if (adapter->intr.type == VMXNET3_IT_MSI) {
if (!pci_enable_msi(adapter->pdev)) {
adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
}
}
#endif /* CONFIG_PCI_MSI */
adapter->num_rx_queues = 1;
dev_info(&adapter->netdev->dev,
"Using INTx interrupt, #Rx queues: 1.\n");
adapter->intr.type = VMXNET3_IT_INTX;
/* INT-X related setting */
adapter->intr.num_intrs = 1;
}
static void
vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
{
if (adapter->intr.type == VMXNET3_IT_MSIX)
pci_disable_msix(adapter->pdev);
else if (adapter->intr.type == VMXNET3_IT_MSI)
pci_disable_msi(adapter->pdev);
else
BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
}
static void
vmxnet3_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
netdev_err(adapter->netdev, "tx hang\n");
schedule_work(&adapter->work);
}
static void
vmxnet3_reset_work(struct work_struct *data)
{
struct vmxnet3_adapter *adapter;
adapter = container_of(data, struct vmxnet3_adapter, work);
/* if another thread is resetting the device, no need to proceed */
if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
return;
/* if the device is closed, we must leave it alone */
rtnl_lock();
if (netif_running(adapter->netdev)) {
netdev_notice(adapter->netdev, "resetting\n");
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
vmxnet3_activate_dev(adapter);
} else {
netdev_info(adapter->netdev, "already closed\n");
}
rtnl_unlock();
netif_wake_queue(adapter->netdev);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
}
static int
vmxnet3_probe_device(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static const struct net_device_ops vmxnet3_netdev_ops = {
.ndo_open = vmxnet3_open,
.ndo_stop = vmxnet3_close,
.ndo_start_xmit = vmxnet3_xmit_frame,
.ndo_set_mac_address = vmxnet3_set_mac_addr,
.ndo_change_mtu = vmxnet3_change_mtu,
.ndo_fix_features = vmxnet3_fix_features,
.ndo_set_features = vmxnet3_set_features,
.ndo_features_check = vmxnet3_features_check,
.ndo_get_stats64 = vmxnet3_get_stats64,
.ndo_tx_timeout = vmxnet3_tx_timeout,
.ndo_set_rx_mode = vmxnet3_set_mc,
.ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = vmxnet3_netpoll,
#endif
.ndo_bpf = vmxnet3_xdp,
.ndo_xdp_xmit = vmxnet3_xdp_xmit,
};
int err;
u32 ver;
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
int size;
int num_tx_queues;
int num_rx_queues;
int queues;
unsigned long flags;
if (!pci_msi_enabled())
enable_mq = 0;
#ifdef VMXNET3_RSS
if (enable_mq)
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
(int)num_online_cpus());
else
#endif
num_rx_queues = 1;
if (enable_mq)
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
(int)num_online_cpus());
else
num_tx_queues = 1;
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
if (!netdev)
return -ENOMEM;
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_set_mask;
}
spin_lock_init(&adapter->cmd_lock);
adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
sizeof(struct vmxnet3_adapter),
DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
dev_err(&pdev->dev, "Failed to map dma\n");
err = -EFAULT;
goto err_set_mask;
}
adapter->shared = dma_alloc_coherent(
&adapter->pdev->dev,
sizeof(struct Vmxnet3_DriverShared),
&adapter->shared_pa, GFP_KERNEL);
if (!adapter->shared) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM;
goto err_alloc_shared;
}
err = vmxnet3_alloc_pci_resources(adapter);
if (err < 0)
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
if (ver & (1 << VMXNET3_REV_7)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_7);
adapter->version = VMXNET3_REV_7 + 1;
} else if (ver & (1 << VMXNET3_REV_6)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_6);
adapter->version = VMXNET3_REV_6 + 1;
} else if (ver & (1 << VMXNET3_REV_5)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_5);
adapter->version = VMXNET3_REV_5 + 1;
} else if (ver & (1 << VMXNET3_REV_4)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_4);
adapter->version = VMXNET3_REV_4 + 1;
} else if (ver & (1 << VMXNET3_REV_3)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_3);
adapter->version = VMXNET3_REV_3 + 1;
} else if (ver & (1 << VMXNET3_REV_2)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_2);
adapter->version = VMXNET3_REV_2 + 1;
} else if (ver & (1 << VMXNET3_REV_1)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_1);
adapter->version = VMXNET3_REV_1 + 1;
} else {
dev_err(&pdev->dev,
"Incompatible h/w version (0x%x) for adapter\n", ver);
err = -EBUSY;
goto err_ver;
}
dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
if (ver & 1) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
} else {
dev_err(&pdev->dev,
"Incompatible upt version (0x%x) for adapter\n", ver);
err = -EBUSY;
goto err_ver;
}
if (VMXNET3_VERSION_GE_7(adapter)) {
adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
adapter->dev_caps[0] = adapter->devcap_supported[0] &
(1UL << VMXNET3_CAP_LARGE_BAR);
}
if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
adapter->dev_caps[0] |= adapter->devcap_supported[0] &
(1UL << VMXNET3_CAP_OOORX_COMP);
}
if (adapter->dev_caps[0])
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
if (VMXNET3_VERSION_GE_7(adapter) &&
adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
} else {
adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
}
if (VMXNET3_VERSION_GE_6(adapter)) {
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_MAX_QUEUES_CONF);
queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (queues > 0) {
adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
} else {
adapter->num_rx_queues = min(num_rx_queues,
VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
adapter->num_tx_queues = min(num_tx_queues,
VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
}
if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
adapter->queuesExtEnabled = true;
} else {
adapter->queuesExtEnabled = false;
}
} else {
adapter->queuesExtEnabled = false;
num_rx_queues = rounddown_pow_of_two(num_rx_queues);
num_tx_queues = rounddown_pow_of_two(num_tx_queues);
adapter->num_rx_queues = min(num_rx_queues,
VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
adapter->num_tx_queues = min(num_tx_queues,
VMXNET3_DEVICE_DEFAULT_TX_QUEUES);
}
dev_info(&pdev->dev,
"# of Tx queues : %d, # of Rx queues : %d\n",
adapter->num_tx_queues, adapter->num_rx_queues);
adapter->rx_buf_per_pkt = 1;
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
&adapter->queue_desc_pa,
GFP_KERNEL);
if (!adapter->tqd_start) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM;
goto err_ver;
}
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
adapter->num_tx_queues);
adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_PMConf),
&adapter->pm_conf_pa,
GFP_KERNEL);
if (adapter->pm_conf == NULL) {
err = -ENOMEM;
goto err_alloc_pm;
}
#ifdef VMXNET3_RSS
adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
sizeof(struct UPT1_RSSConf),
&adapter->rss_conf_pa,
GFP_KERNEL);
if (adapter->rss_conf == NULL) {
err = -ENOMEM;
goto err_alloc_rss;
}
#endif /* VMXNET3_RSS */
if (VMXNET3_VERSION_GE_3(adapter)) {
adapter->coal_conf =
dma_alloc_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_CoalesceScheme)
,
&adapter->coal_conf_pa,
GFP_KERNEL);
if (!adapter->coal_conf) {
err = -ENOMEM;
goto err_coal_conf;
}
adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
adapter->default_coal_mode = true;
}
if (VMXNET3_VERSION_GE_4(adapter)) {
adapter->default_rss_fields = true;
adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
vmxnet3_declare_features(adapter);
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT;
adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
if (adapter->num_tx_queues == adapter->num_rx_queues)
adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
else
adapter->share_intr = VMXNET3_INTR_DONTSHARE;
vmxnet3_alloc_intr_resources(adapter);
#ifdef VMXNET3_RSS
if (adapter->num_rx_queues > 1 &&
adapter->intr.type == VMXNET3_IT_MSIX) {
adapter->rss = true;
netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= NETIF_F_RXHASH;
dev_dbg(&pdev->dev, "RSS is enabled.\n");
} else {
adapter->rss = false;
}
#endif
vmxnet3_read_mac_addr(adapter, mac);
dev_addr_set(netdev, mac);
netdev->netdev_ops = &vmxnet3_netdev_ops;
vmxnet3_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
/* MTU range: 60 - 9190 */
netdev->min_mtu = VMXNET3_MIN_MTU;
if (VMXNET3_VERSION_GE_6(adapter))
netdev->max_mtu = VMXNET3_V6_MAX_MTU;
else
netdev->max_mtu = VMXNET3_MAX_MTU;
INIT_WORK(&adapter->work, vmxnet3_reset_work);
set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
if (adapter->intr.type == VMXNET3_IT_MSIX) {
int i;
for (i = 0; i < adapter->num_rx_queues; i++) {
netif_napi_add(adapter->netdev,
&adapter->rx_queue[i].napi,
vmxnet3_poll_rx_only);
}
} else {
netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
vmxnet3_poll);
}
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
netif_carrier_off(netdev);
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register adapter\n");
goto err_register;
}
vmxnet3_check_link(adapter, false);
return 0;
err_register:
if (VMXNET3_VERSION_GE_3(adapter)) {
dma_free_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_CoalesceScheme),
adapter->coal_conf, adapter->coal_conf_pa);
}
vmxnet3_free_intr_resources(adapter);
err_coal_conf:
#ifdef VMXNET3_RSS
dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
adapter->rss_conf, adapter->rss_conf_pa);
err_alloc_rss:
#endif
dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
adapter->pm_conf, adapter->pm_conf_pa);
err_alloc_pm:
dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
adapter->queue_desc_pa);
err_ver:
vmxnet3_free_pci_resources(adapter);
err_alloc_pci:
dma_free_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
err_alloc_shared:
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
err_set_mask:
free_netdev(netdev);
return err;
}
static void
vmxnet3_remove_device(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int size = 0;
int num_rx_queues, rx_queues;
unsigned long flags;
#ifdef VMXNET3_RSS
if (enable_mq)
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
(int)num_online_cpus());
else
#endif
num_rx_queues = 1;
if (!VMXNET3_VERSION_GE_6(adapter)) {
num_rx_queues = rounddown_pow_of_two(num_rx_queues);
}
if (VMXNET3_VERSION_GE_6(adapter)) {
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_MAX_QUEUES_CONF);
rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (rx_queues > 0)
rx_queues = (rx_queues >> 8) & 0xff;
else
rx_queues = min(num_rx_queues, VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
num_rx_queues = min(num_rx_queues, rx_queues);
} else {
num_rx_queues = min(num_rx_queues,
VMXNET3_DEVICE_DEFAULT_RX_QUEUES);
}
cancel_work_sync(&adapter->work);
unregister_netdev(netdev);
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
if (VMXNET3_VERSION_GE_3(adapter)) {
dma_free_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_CoalesceScheme),
adapter->coal_conf, adapter->coal_conf_pa);
}
#ifdef VMXNET3_RSS
dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
adapter->rss_conf, adapter->rss_conf_pa);
#endif
dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
adapter->pm_conf, adapter->pm_conf_pa);
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
adapter->queue_desc_pa);
dma_free_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_DriverShared),
adapter->shared, adapter->shared_pa);
dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
sizeof(struct vmxnet3_adapter), DMA_TO_DEVICE);
free_netdev(netdev);
}
static void vmxnet3_shutdown_device(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
/* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED,
&adapter->state)) {
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
return;
}
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_QUIESCE_DEV);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_disable_all_intrs(adapter);
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
}
#ifdef CONFIG_PM
static int
vmxnet3_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_PMConf *pmConf;
struct ethhdr *ehdr;
struct arphdr *ahdr;
u8 *arpreq;
struct in_device *in_dev;
struct in_ifaddr *ifa;
unsigned long flags;
int i = 0;
if (!netif_running(netdev))
return 0;
for (i = 0; i < adapter->num_rx_queues; i++)
napi_disable(&adapter->rx_queue[i].napi);
vmxnet3_disable_all_intrs(adapter);
vmxnet3_free_irqs(adapter);
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
memset(pmConf, 0, sizeof(*pmConf));
if (adapter->wol & WAKE_UCAST) {
pmConf->filters[i].patternSize = ETH_ALEN;
pmConf->filters[i].maskSize = 1;
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
if (adapter->wol & WAKE_ARP) {
rcu_read_lock();
in_dev = __in_dev_get_rcu(netdev);
if (!in_dev) {
rcu_read_unlock();
goto skip_arp;
}
ifa = rcu_dereference(in_dev->ifa_list);
if (!ifa) {
rcu_read_unlock();
goto skip_arp;
}
pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
sizeof(struct arphdr) + /* ARP header */
2 * ETH_ALEN + /* 2 Ethernet addresses*/
2 * sizeof(u32); /*2 IPv4 addresses */
pmConf->filters[i].maskSize =
(pmConf->filters[i].patternSize - 1) / 8 + 1;
/* ETH_P_ARP in Ethernet header. */
ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
ehdr->h_proto = htons(ETH_P_ARP);
/* ARPOP_REQUEST in ARP header. */
ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
ahdr->ar_op = htons(ARPOP_REQUEST);
arpreq = (u8 *)(ahdr + 1);
/* The Unicast IPv4 address in 'tip' field. */
arpreq += 2 * ETH_ALEN + sizeof(u32);
*(__be32 *)arpreq = ifa->ifa_address;
rcu_read_unlock();
/* The mask for the relevant bits. */
pmConf->filters[i].mask[0] = 0x00;
pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
pmConf->filters[i].mask[3] = 0x00;
pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
skip_arp:
if (adapter->wol & WAKE_MAGIC)
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
pmConf->numFilters = i;
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
adapter->shared->devRead.pmConfDesc.confPA =
cpu_to_le64(adapter->pm_conf_pa);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_PMCFG);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
adapter->wol);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
return 0;
}
static int
vmxnet3_resume(struct device *device)
{
int err;
unsigned long flags;
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *netdev = pci_get_drvdata(pdev);
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (!netif_running(netdev))
return 0;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
err = pci_enable_device_mem(pdev);
if (err != 0)
return err;
pci_enable_wake(pdev, PCI_D0, 0);
vmxnet3_alloc_intr_resources(adapter);
/* During hibernate and suspend, device has to be reinitialized as the
* device state need not be preserved.
*/
/* Need not check adapter state as other reset tasks cannot run during
* device resume.
*/
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_QUIESCE_DEV);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
vmxnet3_tq_cleanup_all(adapter);
vmxnet3_rq_cleanup_all(adapter);
vmxnet3_reset_dev(adapter);
err = vmxnet3_activate_dev(adapter);
if (err != 0) {
netdev_err(netdev,
"failed to re-activate on resume, error: %d", err);
vmxnet3_force_close(adapter);
return err;
}
netif_device_attach(netdev);
return 0;
}
static const struct dev_pm_ops vmxnet3_pm_ops = {
.suspend = vmxnet3_suspend,
.resume = vmxnet3_resume,
.freeze = vmxnet3_suspend,
.restore = vmxnet3_resume,
};
#endif
static struct pci_driver vmxnet3_driver = {
.name = vmxnet3_driver_name,
.id_table = vmxnet3_pciid_table,
.probe = vmxnet3_probe_device,
.remove = vmxnet3_remove_device,
.shutdown = vmxnet3_shutdown_device,
#ifdef CONFIG_PM
.driver.pm = &vmxnet3_pm_ops,
#endif
};
static int __init
vmxnet3_init_module(void)
{
pr_info("%s - version %s\n", VMXNET3_DRIVER_DESC,
VMXNET3_DRIVER_VERSION_REPORT);
return pci_register_driver(&vmxnet3_driver);
}
module_init(vmxnet3_init_module);
static void
vmxnet3_exit_module(void)
{
pci_unregister_driver(&vmxnet3_driver);
}
module_exit(vmxnet3_exit_module);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);
|
linux-master
|
drivers/net/vmxnet3/vmxnet3_drv.c
|
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; version 2 of the License and no later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Maintained by: [email protected]
*
*/
#include "vmxnet3_int.h"
#include <net/vxlan.h>
#include <net/geneve.h>
#include "vmxnet3_xdp.h"
#define VXLAN_UDP_PORT 8472
struct vmxnet3_stat_desc {
char desc[ETH_GSTRING_LEN];
int offset;
};
/* per tq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_tq_dev_stats[] = {
/* description, offset */
{ "Tx Queue#", 0 },
{ " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
{ " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
{ " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
{ " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
{ " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
{ " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
{ " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
{ " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
{ " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
{ " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
};
/* per tq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_tq_driver_stats[] = {
/* description, offset */
{" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
drop_total) },
{ " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
drop_too_many_frags) },
{ " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
drop_oversized_hdr) },
{ " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
drop_hdr_inspect_err) },
{ " tso", offsetof(struct vmxnet3_tq_driver_stats,
drop_tso) },
{ " ring full", offsetof(struct vmxnet3_tq_driver_stats,
tx_ring_full) },
{ " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
linearized) },
{ " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
copy_skb_header) },
{ " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
oversized_hdr) },
{ " xdp xmit", offsetof(struct vmxnet3_tq_driver_stats,
xdp_xmit) },
{ " xdp xmit err", offsetof(struct vmxnet3_tq_driver_stats,
xdp_xmit_err) },
};
/* per rq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_rq_dev_stats[] = {
{ "Rx Queue#", 0 },
{ " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
{ " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
{ " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
{ " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
{ " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
{ " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
{ " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
{ " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
{ " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
{ " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
};
/* per rq stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_rq_driver_stats[] = {
/* description, offset */
{ " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
drop_total) },
{ " err", offsetof(struct vmxnet3_rq_driver_stats,
drop_err) },
{ " fcs", offsetof(struct vmxnet3_rq_driver_stats,
drop_fcs) },
{ " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
rx_buf_alloc_failure) },
{ " xdp packets", offsetof(struct vmxnet3_rq_driver_stats,
xdp_packets) },
{ " xdp tx", offsetof(struct vmxnet3_rq_driver_stats,
xdp_tx) },
{ " xdp redirects", offsetof(struct vmxnet3_rq_driver_stats,
xdp_redirects) },
{ " xdp drops", offsetof(struct vmxnet3_rq_driver_stats,
xdp_drops) },
{ " xdp aborted", offsetof(struct vmxnet3_rq_driver_stats,
xdp_aborted) },
};
/* global stats maintained by the driver */
static const struct vmxnet3_stat_desc
vmxnet3_global_stats[] = {
/* description, offset */
{ "tx timeout count", offsetof(struct vmxnet3_adapter,
tx_timeout_count) }
};
void
vmxnet3_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct vmxnet3_adapter *adapter;
struct vmxnet3_tq_driver_stats *drvTxStats;
struct vmxnet3_rq_driver_stats *drvRxStats;
struct UPT1_TxStats *devTxStats;
struct UPT1_RxStats *devRxStats;
unsigned long flags;
int i;
adapter = netdev_priv(netdev);
/* Collect the dev stats into the shared area */
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
for (i = 0; i < adapter->num_tx_queues; i++) {
devTxStats = &adapter->tqd_start[i].stats;
drvTxStats = &adapter->tx_queue[i].stats;
stats->tx_packets += devTxStats->ucastPktsTxOK +
devTxStats->mcastPktsTxOK +
devTxStats->bcastPktsTxOK;
stats->tx_bytes += devTxStats->ucastBytesTxOK +
devTxStats->mcastBytesTxOK +
devTxStats->bcastBytesTxOK;
stats->tx_errors += devTxStats->pktsTxError;
stats->tx_dropped += drvTxStats->drop_total;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
devRxStats = &adapter->rqd_start[i].stats;
drvRxStats = &adapter->rx_queue[i].stats;
stats->rx_packets += devRxStats->ucastPktsRxOK +
devRxStats->mcastPktsRxOK +
devRxStats->bcastPktsRxOK;
stats->rx_bytes += devRxStats->ucastBytesRxOK +
devRxStats->mcastBytesRxOK +
devRxStats->bcastBytesRxOK;
stats->rx_errors += devRxStats->pktsRxError;
stats->rx_dropped += drvRxStats->drop_total;
stats->multicast += devRxStats->mcastPktsRxOK;
}
}
static int
vmxnet3_get_sset_count(struct net_device *netdev, int sset)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
switch (sset) {
case ETH_SS_STATS:
return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
adapter->num_tx_queues +
(ARRAY_SIZE(vmxnet3_rq_dev_stats) +
ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
adapter->num_rx_queues +
ARRAY_SIZE(vmxnet3_global_stats);
default:
return -EOPNOTSUPP;
}
}
/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
* the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
* Therefore, if any registers are added, removed or modified, then a version
* bump and a corresponding change in the vmxnet3 support for ethtool(8)
* --register-dump would be required.
*/
static int
vmxnet3_get_regs_len(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
return ((9 /* BAR1 registers */ +
(1 + adapter->intr.num_intrs) +
(1 + adapter->num_tx_queues * 17 /* Tx queue registers */) +
(1 + adapter->num_rx_queues * 23 /* Rx queue registers */)) *
sizeof(u32));
}
static void
vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
strscpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
strscpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
static void
vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int i, j;
if (stringset != ETH_SS_STATS)
return;
for (j = 0; j < adapter->num_tx_queues; j++) {
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
ethtool_sprintf(&buf, vmxnet3_tq_dev_stats[i].desc);
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
ethtool_sprintf(&buf, vmxnet3_tq_driver_stats[i].desc);
}
for (j = 0; j < adapter->num_rx_queues; j++) {
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
ethtool_sprintf(&buf, vmxnet3_rq_dev_stats[i].desc);
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
ethtool_sprintf(&buf, vmxnet3_rq_driver_stats[i].desc);
}
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
ethtool_sprintf(&buf, vmxnet3_global_stats[i].desc);
}
netdev_features_t vmxnet3_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
/* If Rx checksum is disabled, then LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
/* If XDP is enabled, then LRO should not be enabled */
if (vmxnet3_xdp_enabled(adapter) && (features & NETIF_F_LRO)) {
netdev_err(netdev, "LRO is not supported with XDP");
features &= ~NETIF_F_LRO;
}
return features;
}
netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
/* Validate if the tunneled packet is being offloaded by the device */
if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_proto = 0;
u16 port;
struct udphdr *udph;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
l4_proto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
l4_proto = ipv6_hdr(skb)->nexthdr;
break;
default:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
switch (l4_proto) {
case IPPROTO_UDP:
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest);
/* Check if offloaded port is supported */
if (port != GENEVE_UDP_PORT &&
port != IANA_VXLAN_UDP_PORT &&
port != VXLAN_UDP_PORT) {
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
break;
default:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
return features;
}
static void vmxnet3_enable_encap_offloads(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (VMXNET3_VERSION_GE_4(adapter)) {
netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_LRO;
if (features & NETIF_F_GSO_UDP_TUNNEL)
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (VMXNET3_VERSION_GE_7(adapter)) {
unsigned long flags;
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_GENEVE_TSO)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_VXLAN_TSO)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
}
if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL;
}
if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
}
}
static void vmxnet3_disable_encap_offloads(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (VMXNET3_VERSION_GE_4(adapter)) {
netdev->hw_enc_features &= ~(NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM);
}
if (VMXNET3_VERSION_GE_7(adapter)) {
unsigned long flags;
adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD |
1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD |
1UL << VMXNET3_CAP_GENEVE_TSO |
1UL << VMXNET3_CAP_VXLAN_TSO |
1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD |
1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
}
int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
netdev_features_t changed = features ^ netdev->features;
netdev_features_t tun_offload_mask = NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
u8 udp_tun_enabled = (netdev->features & tun_offload_mask) != 0;
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO |
NETIF_F_HW_VLAN_CTAG_RX | tun_offload_mask)) {
if (features & NETIF_F_RXCSUM)
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXCSUM;
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXCSUM;
/* update hardware LRO capability accordingly */
if (features & NETIF_F_LRO)
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_LRO;
if (features & NETIF_F_HW_VLAN_CTAG_RX)
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXVLAN;
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXVLAN;
if ((features & tun_offload_mask) != 0) {
vmxnet3_enable_encap_offloads(netdev, features);
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXINNEROFLD;
} else if ((features & tun_offload_mask) == 0 &&
udp_tun_enabled) {
vmxnet3_disable_encap_offloads(netdev);
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXINNEROFLD;
}
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
return 0;
}
static void
vmxnet3_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *buf)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
u8 *base;
int i;
int j = 0;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
/* this does assume each counter is 64-bit wide */
for (j = 0; j < adapter->num_tx_queues; j++) {
base = (u8 *)&adapter->tqd_start[j].stats;
*buf++ = (u64)j;
for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
*buf++ = *(u64 *)(base +
vmxnet3_tq_dev_stats[i].offset);
base = (u8 *)&adapter->tx_queue[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
*buf++ = *(u64 *)(base +
vmxnet3_tq_driver_stats[i].offset);
}
for (j = 0; j < adapter->num_rx_queues; j++) {
base = (u8 *)&adapter->rqd_start[j].stats;
*buf++ = (u64) j;
for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
*buf++ = *(u64 *)(base +
vmxnet3_rq_dev_stats[i].offset);
base = (u8 *)&adapter->rx_queue[j].stats;
for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
*buf++ = *(u64 *)(base +
vmxnet3_rq_driver_stats[i].offset);
}
base = (u8 *)adapter;
for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
*buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset);
}
/* This is a version 2 of the vmxnet3 ethtool_regs which goes hand in hand with
* the version 2 of the vmxnet3 support for ethtool(8) --register-dump.
* Therefore, if any registers are added, removed or modified, then a version
* bump and a corresponding change in the vmxnet3 support for ethtool(8)
* --register-dump would be required.
*/
static void
vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 *buf = p;
int i = 0, j = 0;
memset(p, 0, vmxnet3_get_regs_len(netdev));
regs->version = 2;
/* Update vmxnet3_get_regs_len if we want to dump more registers */
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAL);
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DSAH);
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
buf[j++] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ECR);
buf[j++] = adapter->intr.num_intrs;
for (i = 0; i < adapter->intr.num_intrs; i++) {
buf[j++] = VMXNET3_READ_BAR0_REG(adapter, VMXNET3_REG_IMR
+ i * VMXNET3_REG_ALIGN);
}
buf[j++] = adapter->num_tx_queues;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->tx_prod_offset +
i * VMXNET3_REG_ALIGN);
buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA);
buf[j++] = tq->tx_ring.size;
buf[j++] = tq->tx_ring.next2fill;
buf[j++] = tq->tx_ring.next2comp;
buf[j++] = tq->tx_ring.gen;
buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA);
buf[j++] = tq->data_ring.size;
buf[j++] = tq->txdata_desc_size;
buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA);
buf[j++] = tq->comp_ring.size;
buf[j++] = tq->comp_ring.next2proc;
buf[j++] = tq->comp_ring.gen;
buf[j++] = tq->stopped;
}
buf[j++] = adapter->num_rx_queues;
for (i = 0; i < adapter->num_rx_queues; i++) {
struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod_offset +
i * VMXNET3_REG_ALIGN);
buf[j++] = VMXNET3_READ_BAR0_REG(adapter, adapter->rx_prod2_offset +
i * VMXNET3_REG_ALIGN);
buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA);
buf[j++] = rq->rx_ring[0].size;
buf[j++] = rq->rx_ring[0].next2fill;
buf[j++] = rq->rx_ring[0].next2comp;
buf[j++] = rq->rx_ring[0].gen;
buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA);
buf[j++] = rq->rx_ring[1].size;
buf[j++] = rq->rx_ring[1].next2fill;
buf[j++] = rq->rx_ring[1].next2comp;
buf[j++] = rq->rx_ring[1].gen;
buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA);
buf[j++] = rq->rx_ring[0].size;
buf[j++] = rq->data_ring.desc_size;
buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA);
buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA);
buf[j++] = rq->comp_ring.size;
buf[j++] = rq->comp_ring.next2proc;
buf[j++] = rq->comp_ring.gen;
}
}
static void
vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC;
wol->wolopts = adapter->wol;
}
static int
vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST |
WAKE_MAGICSECURE)) {
return -EOPNOTSUPP;
}
adapter->wol = wol->wolopts;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}
static int
vmxnet3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
ethtool_link_ksettings_zero_link_mode(ecmd, supported);
ethtool_link_ksettings_add_link_mode(ecmd, supported, 10000baseT_Full);
ethtool_link_ksettings_add_link_mode(ecmd, supported, 1000baseT_Full);
ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
ethtool_link_ksettings_add_link_mode(ecmd, advertising, TP);
ecmd->base.port = PORT_TP;
if (adapter->link_speed) {
ecmd->base.speed = adapter->link_speed;
ecmd->base.duplex = DUPLEX_FULL;
} else {
ecmd->base.speed = SPEED_UNKNOWN;
ecmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static void
vmxnet3_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE;
param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE;
param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ?
VMXNET3_RXDATA_DESC_MAX_SIZE : 0;
param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE;
param->rx_pending = adapter->rx_ring_size;
param->tx_pending = adapter->tx_ring_size;
param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ?
adapter->rxdata_desc_size : 0;
param->rx_jumbo_pending = adapter->rx_ring2_size;
}
static int
vmxnet3_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *param,
struct kernel_ethtool_ringparam *kernel_param,
struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size;
u16 new_rxdata_desc_size;
u32 sz;
int err = 0;
if (param->tx_pending == 0 || param->tx_pending >
VMXNET3_TX_RING_MAX_SIZE)
return -EINVAL;
if (param->rx_pending == 0 || param->rx_pending >
VMXNET3_RX_RING_MAX_SIZE)
return -EINVAL;
if (param->rx_jumbo_pending == 0 ||
param->rx_jumbo_pending > VMXNET3_RX_RING2_MAX_SIZE)
return -EINVAL;
/* if adapter not yet initialized, do nothing */
if (adapter->rx_buf_per_pkt == 0) {
netdev_err(netdev, "adapter not completely initialized, "
"ring size cannot be changed yet\n");
return -EOPNOTSUPP;
}
if (VMXNET3_VERSION_GE_3(adapter)) {
if (param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE)
return -EINVAL;
} else if (param->rx_mini_pending != 0) {
return -EINVAL;
}
/* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
~VMXNET3_RING_SIZE_MASK;
new_tx_ring_size = min_t(u32, new_tx_ring_size,
VMXNET3_TX_RING_MAX_SIZE);
if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size %
VMXNET3_RING_SIZE_ALIGN) != 0)
return -EINVAL;
/* ring0 has to be a multiple of
* rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
*/
sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz;
new_rx_ring_size = min_t(u32, new_rx_ring_size,
VMXNET3_RX_RING_MAX_SIZE / sz * sz);
if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size %
sz) != 0)
return -EINVAL;
/* ring2 has to be a multiple of VMXNET3_RING_SIZE_ALIGN */
new_rx_ring2_size = (param->rx_jumbo_pending + VMXNET3_RING_SIZE_MASK) &
~VMXNET3_RING_SIZE_MASK;
new_rx_ring2_size = min_t(u32, new_rx_ring2_size,
VMXNET3_RX_RING2_MAX_SIZE);
/* For v7 and later, keep ring size power of 2 for UPT */
if (VMXNET3_VERSION_GE_7(adapter)) {
new_tx_ring_size = rounddown_pow_of_two(new_tx_ring_size);
new_rx_ring_size = rounddown_pow_of_two(new_rx_ring_size);
new_rx_ring2_size = rounddown_pow_of_two(new_rx_ring2_size);
}
/* rx data ring buffer size has to be a multiple of
* VMXNET3_RXDATA_DESC_SIZE_ALIGN
*/
new_rxdata_desc_size =
(param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) &
~VMXNET3_RXDATA_DESC_SIZE_MASK;
new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size,
VMXNET3_RXDATA_DESC_MAX_SIZE);
if (new_tx_ring_size == adapter->tx_ring_size &&
new_rx_ring_size == adapter->rx_ring_size &&
new_rx_ring2_size == adapter->rx_ring2_size &&
new_rxdata_desc_size == adapter->rxdata_desc_size) {
return 0;
}
/*
* Reset_work may be in the middle of resetting the device, wait for its
* completion.
*/
while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
usleep_range(1000, 2000);
if (netif_running(netdev)) {
vmxnet3_quiesce_dev(adapter);
vmxnet3_reset_dev(adapter);
/* recreate the rx queue and the tx queue based on the
* new sizes */
vmxnet3_tq_destroy_all(adapter);
vmxnet3_rq_destroy_all(adapter);
err = vmxnet3_create_queues(adapter, new_tx_ring_size,
new_rx_ring_size, new_rx_ring2_size,
adapter->txdata_desc_size,
new_rxdata_desc_size);
if (err) {
/* failed, most likely because of OOM, try default
* size */
netdev_err(netdev, "failed to apply new sizes, "
"try the default ones\n");
new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
err = vmxnet3_create_queues(adapter,
new_tx_ring_size,
new_rx_ring_size,
new_rx_ring2_size,
adapter->txdata_desc_size,
new_rxdata_desc_size);
if (err) {
netdev_err(netdev, "failed to create queues "
"with default sizes. Closing it\n");
goto out;
}
}
err = vmxnet3_activate_dev(adapter);
if (err)
netdev_err(netdev, "failed to re-activate, error %d."
" Closing it\n", err);
}
adapter->tx_ring_size = new_tx_ring_size;
adapter->rx_ring_size = new_rx_ring_size;
adapter->rx_ring2_size = new_rx_ring2_size;
adapter->rxdata_desc_size = new_rxdata_desc_size;
out:
clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
if (err)
vmxnet3_force_close(adapter);
return err;
}
static int
vmxnet3_get_rss_hash_opts(struct vmxnet3_adapter *adapter,
struct ethtool_rxnfc *info)
{
enum Vmxnet3_RSSField rss_fields;
if (netif_running(adapter->netdev)) {
unsigned long flags;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_RSS_FIELDS);
rss_fields = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
} else {
rss_fields = adapter->rss_fields;
}
info->data = 0;
/* Report default options for RSS on vmxnet3 */
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V4_FLOW:
if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP4)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
info->data |= RXH_IP_SRC | RXH_IP_DST;
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
if (rss_fields & VMXNET3_RSS_FIELDS_ESPIP4)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
case SCTP_V4_FLOW:
case IPV4_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
break;
case UDP_V6_FLOW:
if (rss_fields & VMXNET3_RSS_FIELDS_UDPIP6)
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
info->data |= RXH_IP_SRC | RXH_IP_DST;
break;
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
if (VMXNET3_VERSION_GE_6(adapter) &&
(rss_fields & VMXNET3_RSS_FIELDS_ESPIP6))
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
case SCTP_V6_FLOW:
case IPV6_FLOW:
info->data |= RXH_IP_SRC | RXH_IP_DST;
break;
default:
return -EINVAL;
}
return 0;
}
static int
vmxnet3_set_rss_hash_opt(struct net_device *netdev,
struct vmxnet3_adapter *adapter,
struct ethtool_rxnfc *nfc)
{
enum Vmxnet3_RSSField rss_fields = adapter->rss_fields;
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
!(nfc->data & RXH_L4_B_0_1) ||
!(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
break;
case UDP_V4_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST))
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP4;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_fields |= VMXNET3_RSS_FIELDS_UDPIP4;
break;
default:
return -EINVAL;
}
break;
case UDP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST))
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
rss_fields &= ~VMXNET3_RSS_FIELDS_UDPIP6;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_fields |= VMXNET3_RSS_FIELDS_UDPIP6;
break;
default:
return -EINVAL;
}
break;
case ESP_V4_FLOW:
case AH_V4_FLOW:
case AH_ESP_V4_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST))
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP4;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_fields |= VMXNET3_RSS_FIELDS_ESPIP4;
break;
default:
return -EINVAL;
}
break;
case ESP_V6_FLOW:
case AH_V6_FLOW:
case AH_ESP_V6_FLOW:
if (!VMXNET3_VERSION_GE_6(adapter))
return -EOPNOTSUPP;
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST))
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
rss_fields &= ~VMXNET3_RSS_FIELDS_ESPIP6;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
rss_fields |= VMXNET3_RSS_FIELDS_ESPIP6;
break;
default:
return -EINVAL;
}
break;
case SCTP_V4_FLOW:
case SCTP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
(nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
break;
default:
return -EINVAL;
}
/* if we changed something we need to update flags */
if (rss_fields != adapter->rss_fields) {
adapter->default_rss_fields = false;
if (netif_running(netdev)) {
struct Vmxnet3_DriverShared *shared = adapter->shared;
union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
unsigned long flags;
if (VMXNET3_VERSION_GE_7(adapter)) {
if ((rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_UDP_RSS)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
} else {
adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
}
if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_ESP_RSS_IPV4)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
} else {
adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
}
if ((rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
VMXNET3_CAP_ESP_RSS_IPV6)) {
adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
} else {
adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
}
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR,
adapter->dev_caps[0]);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_DCR0_REG);
adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter,
VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
spin_lock_irqsave(&adapter->cmd_lock, flags);
cmdInfo->setRssFields = rss_fields;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_SET_RSS_FIELDS);
/* Not all requested RSS may get applied, so get and
* cache what was actually applied.
*/
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_RSS_FIELDS);
adapter->rss_fields =
VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
} else {
/* When the device is activated, we will try to apply
* these rules and cache the applied value later.
*/
adapter->rss_fields = rss_fields;
}
}
return 0;
}
static int
vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
u32 *rules)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = adapter->num_rx_queues;
break;
case ETHTOOL_GRXFH:
if (!VMXNET3_VERSION_GE_4(adapter)) {
err = -EOPNOTSUPP;
break;
}
#ifdef VMXNET3_RSS
if (!adapter->rss) {
err = -EOPNOTSUPP;
break;
}
#endif
err = vmxnet3_get_rss_hash_opts(adapter, info);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
static int
vmxnet3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
if (!VMXNET3_VERSION_GE_4(adapter)) {
err = -EOPNOTSUPP;
goto done;
}
#ifdef VMXNET3_RSS
if (!adapter->rss) {
err = -EOPNOTSUPP;
goto done;
}
#endif
switch (info->cmd) {
case ETHTOOL_SRXFH:
err = vmxnet3_set_rss_hash_opt(netdev, adapter, info);
break;
default:
err = -EOPNOTSUPP;
break;
}
done:
return err;
}
#ifdef VMXNET3_RSS
static u32
vmxnet3_get_rss_indir_size(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
return rssConf->indTableSize;
}
static int
vmxnet3_get_rss(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
unsigned int n = rssConf->indTableSize;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!p)
return 0;
if (n > UPT1_RSS_MAX_IND_TABLE_SIZE)
return 0;
while (n--)
p[n] = rssConf->indTable[n];
return 0;
}
static int
vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key,
const u8 hfunc)
{
unsigned int i;
unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
/* We do not allow change in unsupported parameters */
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!p)
return 0;
for (i = 0; i < rssConf->indTableSize; i++)
rssConf->indTable[i] = p[i];
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_RSSIDT);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
return 0;
}
#endif
static int vmxnet3_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (!VMXNET3_VERSION_GE_3(adapter))
return -EOPNOTSUPP;
switch (adapter->coal_conf->coalMode) {
case VMXNET3_COALESCE_DISABLED:
/* struct ethtool_coalesce is already initialized to 0 */
break;
case VMXNET3_COALESCE_ADAPT:
ec->use_adaptive_rx_coalesce = true;
break;
case VMXNET3_COALESCE_STATIC:
ec->tx_max_coalesced_frames =
adapter->coal_conf->coalPara.coalStatic.tx_comp_depth;
ec->rx_max_coalesced_frames =
adapter->coal_conf->coalPara.coalStatic.rx_depth;
break;
case VMXNET3_COALESCE_RBC: {
u32 rbc_rate;
rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate;
ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate);
}
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int vmxnet3_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct Vmxnet3_DriverShared *shared = adapter->shared;
union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
unsigned long flags;
if (!VMXNET3_VERSION_GE_3(adapter))
return -EOPNOTSUPP;
if ((ec->rx_coalesce_usecs == 0) &&
(ec->use_adaptive_rx_coalesce == 0) &&
(ec->tx_max_coalesced_frames == 0) &&
(ec->rx_max_coalesced_frames == 0)) {
memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
goto done;
}
if (ec->rx_coalesce_usecs != 0) {
u32 rbc_rate;
if ((ec->use_adaptive_rx_coalesce != 0) ||
(ec->tx_max_coalesced_frames != 0) ||
(ec->rx_max_coalesced_frames != 0)) {
return -EINVAL;
}
rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs);
if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE ||
rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) {
return -EINVAL;
}
memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC;
adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate;
goto done;
}
if (ec->use_adaptive_rx_coalesce != 0) {
if (ec->tx_max_coalesced_frames != 0 ||
ec->rx_max_coalesced_frames != 0) {
return -EINVAL;
}
memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT;
goto done;
}
if ((ec->tx_max_coalesced_frames != 0) ||
(ec->rx_max_coalesced_frames != 0)) {
if ((ec->tx_max_coalesced_frames >
VMXNET3_COAL_STATIC_MAX_DEPTH) ||
(ec->rx_max_coalesced_frames >
VMXNET3_COAL_STATIC_MAX_DEPTH)) {
return -EINVAL;
}
memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC;
adapter->coal_conf->coalPara.coalStatic.tx_comp_depth =
(ec->tx_max_coalesced_frames ?
ec->tx_max_coalesced_frames :
VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
adapter->coal_conf->coalPara.coalStatic.rx_depth =
(ec->rx_max_coalesced_frames ?
ec->rx_max_coalesced_frames :
VMXNET3_COAL_STATIC_DEFAULT_DEPTH);
adapter->coal_conf->coalPara.coalStatic.tx_depth =
VMXNET3_COAL_STATIC_DEFAULT_DEPTH;
goto done;
}
done:
adapter->default_coal_mode = false;
if (netif_running(netdev)) {
spin_lock_irqsave(&adapter->cmd_lock, flags);
cmdInfo->varConf.confVer = 1;
cmdInfo->varConf.confLen =
cpu_to_le32(sizeof(*adapter->coal_conf));
cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_SET_COALESCE);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
return 0;
}
static void vmxnet3_get_channels(struct net_device *netdev,
struct ethtool_channels *ec)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
if (IS_ENABLED(CONFIG_PCI_MSI) && adapter->intr.type == VMXNET3_IT_MSIX) {
if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
ec->combined_count = adapter->num_tx_queues;
} else {
ec->rx_count = adapter->num_rx_queues;
ec->tx_count =
adapter->share_intr == VMXNET3_INTR_TXSHARE ?
1 : adapter->num_tx_queues;
}
} else {
ec->combined_count = 1;
}
ec->other_count = 1;
/* Number of interrupts cannot be changed on the fly */
/* Just set maximums to actual values */
ec->max_rx = ec->rx_count;
ec->max_tx = ec->tx_count;
ec->max_combined = ec->combined_count;
ec->max_other = ec->other_count;
}
static const struct ethtool_ops vmxnet3_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
.get_regs = vmxnet3_get_regs,
.get_wol = vmxnet3_get_wol,
.set_wol = vmxnet3_set_wol,
.get_link = ethtool_op_get_link,
.get_coalesce = vmxnet3_get_coalesce,
.set_coalesce = vmxnet3_set_coalesce,
.get_strings = vmxnet3_get_strings,
.get_sset_count = vmxnet3_get_sset_count,
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
.get_ringparam = vmxnet3_get_ringparam,
.set_ringparam = vmxnet3_set_ringparam,
.get_rxnfc = vmxnet3_get_rxnfc,
.set_rxnfc = vmxnet3_set_rxnfc,
#ifdef VMXNET3_RSS
.get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh = vmxnet3_get_rss,
.set_rxfh = vmxnet3_set_rss,
#endif
.get_link_ksettings = vmxnet3_get_link_ksettings,
.get_channels = vmxnet3_get_channels,
};
void vmxnet3_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &vmxnet3_ethtool_ops;
}
|
linux-master
|
drivers/net/vmxnet3/vmxnet3_ethtool.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Mellanox Technologies. All rights reserved */
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/inet.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <net/devlink.h>
#include <net/ip.h>
#include <net/psample.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/udp.h>
#include "netdevsim.h"
#define NSIM_PSAMPLE_REPORT_INTERVAL_MS 100
#define NSIM_PSAMPLE_INVALID_TC 0xFFFF
#define NSIM_PSAMPLE_L4_DATA_LEN 100
struct nsim_dev_psample {
struct delayed_work psample_dw;
struct dentry *ddir;
struct psample_group *group;
u32 rate;
u32 group_num;
u32 trunc_size;
int in_ifindex;
int out_ifindex;
u16 out_tc;
u64 out_tc_occ_max;
u64 latency_max;
bool is_active;
};
static struct sk_buff *nsim_dev_psample_skb_build(void)
{
int tot_len, data_len = NSIM_PSAMPLE_L4_DATA_LEN;
struct sk_buff *skb;
struct udphdr *udph;
struct ethhdr *eth;
struct iphdr *iph;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return NULL;
tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len;
skb_reset_mac_header(skb);
eth = skb_put(skb, sizeof(struct ethhdr));
eth_random_addr(eth->h_dest);
eth_random_addr(eth->h_source);
eth->h_proto = htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb_set_network_header(skb, skb->len);
iph = skb_put(skb, sizeof(struct iphdr));
iph->protocol = IPPROTO_UDP;
iph->saddr = in_aton("192.0.2.1");
iph->daddr = in_aton("198.51.100.1");
iph->version = 0x4;
iph->frag_off = 0;
iph->ihl = 0x5;
iph->tot_len = htons(tot_len);
iph->id = 0;
iph->ttl = 100;
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
skb_set_transport_header(skb, skb->len);
udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len);
get_random_bytes(&udph->source, sizeof(u16));
get_random_bytes(&udph->dest, sizeof(u16));
udph->len = htons(sizeof(struct udphdr) + data_len);
return skb;
}
static void nsim_dev_psample_md_prepare(const struct nsim_dev_psample *psample,
struct psample_metadata *md,
unsigned int len)
{
md->trunc_size = psample->trunc_size ? psample->trunc_size : len;
md->in_ifindex = psample->in_ifindex;
md->out_ifindex = psample->out_ifindex;
if (psample->out_tc != NSIM_PSAMPLE_INVALID_TC) {
md->out_tc = psample->out_tc;
md->out_tc_valid = 1;
}
if (psample->out_tc_occ_max) {
u64 out_tc_occ;
get_random_bytes(&out_tc_occ, sizeof(u64));
md->out_tc_occ = out_tc_occ & (psample->out_tc_occ_max - 1);
md->out_tc_occ_valid = 1;
}
if (psample->latency_max) {
u64 latency;
get_random_bytes(&latency, sizeof(u64));
md->latency = latency & (psample->latency_max - 1);
md->latency_valid = 1;
}
}
static void nsim_dev_psample_report_work(struct work_struct *work)
{
struct nsim_dev_psample *psample;
struct psample_metadata md = {};
struct sk_buff *skb;
unsigned long delay;
psample = container_of(work, struct nsim_dev_psample, psample_dw.work);
skb = nsim_dev_psample_skb_build();
if (!skb)
goto out;
nsim_dev_psample_md_prepare(psample, &md, skb->len);
psample_sample_packet(psample->group, skb, psample->rate, &md);
consume_skb(skb);
out:
delay = msecs_to_jiffies(NSIM_PSAMPLE_REPORT_INTERVAL_MS);
schedule_delayed_work(&psample->psample_dw, delay);
}
static int nsim_dev_psample_enable(struct nsim_dev *nsim_dev)
{
struct nsim_dev_psample *psample = nsim_dev->psample;
struct devlink *devlink;
unsigned long delay;
if (psample->is_active)
return -EBUSY;
devlink = priv_to_devlink(nsim_dev);
psample->group = psample_group_get(devlink_net(devlink),
psample->group_num);
if (!psample->group)
return -EINVAL;
delay = msecs_to_jiffies(NSIM_PSAMPLE_REPORT_INTERVAL_MS);
schedule_delayed_work(&psample->psample_dw, delay);
psample->is_active = true;
return 0;
}
static int nsim_dev_psample_disable(struct nsim_dev *nsim_dev)
{
struct nsim_dev_psample *psample = nsim_dev->psample;
if (!psample->is_active)
return -EINVAL;
psample->is_active = false;
cancel_delayed_work_sync(&psample->psample_dw);
psample_group_put(psample->group);
return 0;
}
static ssize_t nsim_dev_psample_enable_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev *nsim_dev = file->private_data;
bool enable;
int err;
err = kstrtobool_from_user(data, count, &enable);
if (err)
return err;
if (enable)
err = nsim_dev_psample_enable(nsim_dev);
else
err = nsim_dev_psample_disable(nsim_dev);
return err ? err : count;
}
static const struct file_operations nsim_psample_enable_fops = {
.open = simple_open,
.write = nsim_dev_psample_enable_write,
.llseek = generic_file_llseek,
.owner = THIS_MODULE,
};
int nsim_dev_psample_init(struct nsim_dev *nsim_dev)
{
struct nsim_dev_psample *psample;
int err;
psample = kzalloc(sizeof(*psample), GFP_KERNEL);
if (!psample)
return -ENOMEM;
nsim_dev->psample = psample;
INIT_DELAYED_WORK(&psample->psample_dw, nsim_dev_psample_report_work);
psample->ddir = debugfs_create_dir("psample", nsim_dev->ddir);
if (IS_ERR(psample->ddir)) {
err = PTR_ERR(psample->ddir);
goto err_psample_free;
}
/* Populate sampling parameters with sane defaults. */
psample->rate = 100;
debugfs_create_u32("rate", 0600, psample->ddir, &psample->rate);
psample->group_num = 10;
debugfs_create_u32("group_num", 0600, psample->ddir,
&psample->group_num);
psample->trunc_size = 0;
debugfs_create_u32("trunc_size", 0600, psample->ddir,
&psample->trunc_size);
psample->in_ifindex = 1;
debugfs_create_u32("in_ifindex", 0600, psample->ddir,
&psample->in_ifindex);
psample->out_ifindex = 2;
debugfs_create_u32("out_ifindex", 0600, psample->ddir,
&psample->out_ifindex);
psample->out_tc = 0;
debugfs_create_u16("out_tc", 0600, psample->ddir, &psample->out_tc);
psample->out_tc_occ_max = 10000;
debugfs_create_u64("out_tc_occ_max", 0600, psample->ddir,
&psample->out_tc_occ_max);
psample->latency_max = 50;
debugfs_create_u64("latency_max", 0600, psample->ddir,
&psample->latency_max);
debugfs_create_file("enable", 0200, psample->ddir, nsim_dev,
&nsim_psample_enable_fops);
return 0;
err_psample_free:
kfree(nsim_dev->psample);
return err;
}
void nsim_dev_psample_exit(struct nsim_dev *nsim_dev)
{
debugfs_remove_recursive(nsim_dev->psample->ddir);
if (nsim_dev->psample->is_active) {
cancel_delayed_work_sync(&nsim_dev->psample->psample_dw);
psample_group_put(nsim_dev->psample->group);
}
kfree(nsim_dev->psample);
}
|
linux-master
|
drivers/net/netdevsim/psample.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "netdevsim.h"
static int
nsim_dev_empty_reporter_dump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx,
struct netlink_ext_ack *extack)
{
return 0;
}
static int
nsim_dev_empty_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
{
return 0;
}
static const
struct devlink_health_reporter_ops nsim_dev_empty_reporter_ops = {
.name = "empty",
.dump = nsim_dev_empty_reporter_dump,
.diagnose = nsim_dev_empty_reporter_diagnose,
};
struct nsim_dev_dummy_reporter_ctx {
char *break_msg;
};
static int
nsim_dev_dummy_reporter_recover(struct devlink_health_reporter *reporter,
void *priv_ctx,
struct netlink_ext_ack *extack)
{
struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
if (health->fail_recover) {
/* For testing purposes, user set debugfs fail_recover
* value to true. Fail right away.
*/
NL_SET_ERR_MSG_MOD(extack, "User setup the recover to fail for testing purposes");
return -EINVAL;
}
if (ctx) {
kfree(health->recovered_break_msg);
health->recovered_break_msg = kstrdup(ctx->break_msg,
GFP_KERNEL);
if (!health->recovered_break_msg)
return -ENOMEM;
}
return 0;
}
static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
{
char *binary;
int err;
int i;
err = devlink_fmsg_bool_pair_put(fmsg, "test_bool", true);
if (err)
return err;
err = devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1);
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3);
if (err)
return err;
err = devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4);
if (err)
return err;
err = devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring");
if (err)
return err;
binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN);
if (!binary)
return -ENOMEM;
get_random_bytes(binary, binary_len);
err = devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len);
kfree(binary);
if (err)
return err;
err = devlink_fmsg_pair_nest_start(fmsg, "test_nest");
if (err)
return err;
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false);
if (err)
return err;
err = devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false);
if (err)
return err;
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
err = devlink_fmsg_pair_nest_end(fmsg);
if (err)
return err;
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array");
if (err)
return err;
for (i = 0; i < 10; i++) {
err = devlink_fmsg_u32_put(fmsg, i);
if (err)
return err;
}
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects");
if (err)
return err;
for (i = 0; i < 10; i++) {
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = devlink_fmsg_bool_pair_put(fmsg,
"in_array_nested_test_bool",
false);
if (err)
return err;
err = devlink_fmsg_u8_pair_put(fmsg,
"in_array_nested_test_u8",
i);
if (err)
return err;
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
}
return devlink_fmsg_arr_pair_nest_end(fmsg);
}
static int
nsim_dev_dummy_reporter_dump(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg, void *priv_ctx,
struct netlink_ext_ack *extack)
{
struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
int err;
if (ctx) {
err = devlink_fmsg_string_pair_put(fmsg, "break_message",
ctx->break_msg);
if (err)
return err;
}
return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
}
static int
nsim_dev_dummy_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
{
struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
int err;
if (health->recovered_break_msg) {
err = devlink_fmsg_string_pair_put(fmsg,
"recovered_break_message",
health->recovered_break_msg);
if (err)
return err;
}
return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
}
static const
struct devlink_health_reporter_ops nsim_dev_dummy_reporter_ops = {
.name = "dummy",
.recover = nsim_dev_dummy_reporter_recover,
.dump = nsim_dev_dummy_reporter_dump,
.diagnose = nsim_dev_dummy_reporter_diagnose,
};
static ssize_t nsim_dev_health_break_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev_health *health = file->private_data;
struct nsim_dev_dummy_reporter_ctx ctx;
char *break_msg;
int err;
break_msg = memdup_user_nul(data, count);
if (IS_ERR(break_msg))
return PTR_ERR(break_msg);
if (break_msg[count - 1] == '\n')
break_msg[count - 1] = '\0';
ctx.break_msg = break_msg;
err = devlink_health_report(health->dummy_reporter, break_msg, &ctx);
if (err)
goto out;
out:
kfree(break_msg);
return err ?: count;
}
static const struct file_operations nsim_dev_health_break_fops = {
.open = simple_open,
.write = nsim_dev_health_break_write,
.llseek = generic_file_llseek,
.owner = THIS_MODULE,
};
int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
{
struct nsim_dev_health *health = &nsim_dev->health;
int err;
health->empty_reporter =
devl_health_reporter_create(devlink,
&nsim_dev_empty_reporter_ops,
0, health);
if (IS_ERR(health->empty_reporter))
return PTR_ERR(health->empty_reporter);
health->dummy_reporter =
devl_health_reporter_create(devlink,
&nsim_dev_dummy_reporter_ops,
0, health);
if (IS_ERR(health->dummy_reporter)) {
err = PTR_ERR(health->dummy_reporter);
goto err_empty_reporter_destroy;
}
health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
if (IS_ERR(health->ddir)) {
err = PTR_ERR(health->ddir);
goto err_dummy_reporter_destroy;
}
health->recovered_break_msg = NULL;
debugfs_create_file("break_health", 0200, health->ddir, health,
&nsim_dev_health_break_fops);
health->binary_len = 16;
debugfs_create_u32("binary_len", 0600, health->ddir,
&health->binary_len);
health->fail_recover = false;
debugfs_create_bool("fail_recover", 0600, health->ddir,
&health->fail_recover);
return 0;
err_dummy_reporter_destroy:
devl_health_reporter_destroy(health->dummy_reporter);
err_empty_reporter_destroy:
devl_health_reporter_destroy(health->empty_reporter);
return err;
}
void nsim_dev_health_exit(struct nsim_dev *nsim_dev)
{
struct nsim_dev_health *health = &nsim_dev->health;
debugfs_remove_recursive(health->ddir);
kfree(health->recovered_break_msg);
devl_health_reporter_destroy(health->dummy_reporter);
devl_health_reporter_destroy(health->empty_reporter);
}
|
linux-master
|
drivers/net/netdevsim/health.c
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include "netdevsim.h"
#define NSIM_DEV_HWSTATS_TRAFFIC_MS 100
static struct list_head *
nsim_dev_hwstats_get_list_head(struct nsim_dev_hwstats *hwstats,
enum netdev_offload_xstats_type type)
{
switch (type) {
case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
return &hwstats->l3_list;
}
WARN_ON_ONCE(1);
return NULL;
}
static void nsim_dev_hwstats_traffic_bump(struct nsim_dev_hwstats *hwstats,
enum netdev_offload_xstats_type type)
{
struct nsim_dev_hwstats_netdev *hwsdev;
struct list_head *hwsdev_list;
hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
if (WARN_ON(!hwsdev_list))
return;
list_for_each_entry(hwsdev, hwsdev_list, list) {
if (hwsdev->enabled) {
hwsdev->stats.rx_packets += 1;
hwsdev->stats.tx_packets += 2;
hwsdev->stats.rx_bytes += 100;
hwsdev->stats.tx_bytes += 300;
}
}
}
static void nsim_dev_hwstats_traffic_work(struct work_struct *work)
{
struct nsim_dev_hwstats *hwstats;
hwstats = container_of(work, struct nsim_dev_hwstats, traffic_dw.work);
mutex_lock(&hwstats->hwsdev_list_lock);
nsim_dev_hwstats_traffic_bump(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
mutex_unlock(&hwstats->hwsdev_list_lock);
schedule_delayed_work(&hwstats->traffic_dw,
msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
}
static struct nsim_dev_hwstats_netdev *
nsim_dev_hwslist_find_hwsdev(struct list_head *hwsdev_list,
int ifindex)
{
struct nsim_dev_hwstats_netdev *hwsdev;
list_for_each_entry(hwsdev, hwsdev_list, list) {
if (hwsdev->netdev->ifindex == ifindex)
return hwsdev;
}
return NULL;
}
static int nsim_dev_hwsdev_enable(struct nsim_dev_hwstats_netdev *hwsdev,
struct netlink_ext_ack *extack)
{
if (hwsdev->fail_enable) {
hwsdev->fail_enable = false;
NL_SET_ERR_MSG_MOD(extack, "Stats enablement set to fail");
return -ECANCELED;
}
hwsdev->enabled = true;
return 0;
}
static void nsim_dev_hwsdev_disable(struct nsim_dev_hwstats_netdev *hwsdev)
{
hwsdev->enabled = false;
memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
}
static int
nsim_dev_hwsdev_report_delta(struct nsim_dev_hwstats_netdev *hwsdev,
struct netdev_notifier_offload_xstats_info *info)
{
netdev_offload_xstats_report_delta(info->report_delta, &hwsdev->stats);
memset(&hwsdev->stats, 0, sizeof(hwsdev->stats));
return 0;
}
static void
nsim_dev_hwsdev_report_used(struct nsim_dev_hwstats_netdev *hwsdev,
struct netdev_notifier_offload_xstats_info *info)
{
if (hwsdev->enabled)
netdev_offload_xstats_report_used(info->report_used);
}
static int nsim_dev_hwstats_event_off_xstats(struct nsim_dev_hwstats *hwstats,
struct net_device *dev,
unsigned long event, void *ptr)
{
struct netdev_notifier_offload_xstats_info *info;
struct nsim_dev_hwstats_netdev *hwsdev;
struct list_head *hwsdev_list;
int err = 0;
info = ptr;
hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, info->type);
if (!hwsdev_list)
return 0;
mutex_lock(&hwstats->hwsdev_list_lock);
hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
if (!hwsdev)
goto out;
switch (event) {
case NETDEV_OFFLOAD_XSTATS_ENABLE:
err = nsim_dev_hwsdev_enable(hwsdev, info->info.extack);
break;
case NETDEV_OFFLOAD_XSTATS_DISABLE:
nsim_dev_hwsdev_disable(hwsdev);
break;
case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
nsim_dev_hwsdev_report_used(hwsdev, info);
break;
case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
err = nsim_dev_hwsdev_report_delta(hwsdev, info);
break;
}
out:
mutex_unlock(&hwstats->hwsdev_list_lock);
return err;
}
static void nsim_dev_hwsdev_fini(struct nsim_dev_hwstats_netdev *hwsdev)
{
dev_put(hwsdev->netdev);
kfree(hwsdev);
}
static void
__nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
struct net_device *dev,
enum netdev_offload_xstats_type type)
{
struct nsim_dev_hwstats_netdev *hwsdev;
struct list_head *hwsdev_list;
hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
if (WARN_ON(!hwsdev_list))
return;
hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, dev->ifindex);
if (!hwsdev)
return;
list_del(&hwsdev->list);
nsim_dev_hwsdev_fini(hwsdev);
}
static void nsim_dev_hwstats_event_unregister(struct nsim_dev_hwstats *hwstats,
struct net_device *dev)
{
mutex_lock(&hwstats->hwsdev_list_lock);
__nsim_dev_hwstats_event_unregister(hwstats, dev,
NETDEV_OFFLOAD_XSTATS_TYPE_L3);
mutex_unlock(&hwstats->hwsdev_list_lock);
}
static int nsim_dev_hwstats_event(struct nsim_dev_hwstats *hwstats,
struct net_device *dev,
unsigned long event, void *ptr)
{
switch (event) {
case NETDEV_OFFLOAD_XSTATS_ENABLE:
case NETDEV_OFFLOAD_XSTATS_DISABLE:
case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
return nsim_dev_hwstats_event_off_xstats(hwstats, dev,
event, ptr);
case NETDEV_UNREGISTER:
nsim_dev_hwstats_event_unregister(hwstats, dev);
break;
}
return 0;
}
static int nsim_dev_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct nsim_dev_hwstats *hwstats;
int err = 0;
hwstats = container_of(nb, struct nsim_dev_hwstats, netdevice_nb);
err = nsim_dev_hwstats_event(hwstats, dev, event, ptr);
if (err)
return notifier_from_errno(err);
return NOTIFY_OK;
}
static int
nsim_dev_hwstats_enable_ifindex(struct nsim_dev_hwstats *hwstats,
int ifindex,
enum netdev_offload_xstats_type type,
struct list_head *hwsdev_list)
{
struct nsim_dev_hwstats_netdev *hwsdev;
struct nsim_dev *nsim_dev;
struct net_device *netdev;
bool notify = false;
struct net *net;
int err = 0;
nsim_dev = container_of(hwstats, struct nsim_dev, hwstats);
net = nsim_dev_net(nsim_dev);
rtnl_lock();
mutex_lock(&hwstats->hwsdev_list_lock);
hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
if (hwsdev)
goto out_unlock_list;
netdev = dev_get_by_index(net, ifindex);
if (!netdev) {
err = -ENODEV;
goto out_unlock_list;
}
hwsdev = kzalloc(sizeof(*hwsdev), GFP_KERNEL);
if (!hwsdev) {
err = -ENOMEM;
goto out_put_netdev;
}
hwsdev->netdev = netdev;
list_add_tail(&hwsdev->list, hwsdev_list);
mutex_unlock(&hwstats->hwsdev_list_lock);
if (netdev_offload_xstats_enabled(netdev, type)) {
nsim_dev_hwsdev_enable(hwsdev, NULL);
notify = true;
}
if (notify)
rtnl_offload_xstats_notify(netdev);
rtnl_unlock();
return err;
out_put_netdev:
dev_put(netdev);
out_unlock_list:
mutex_unlock(&hwstats->hwsdev_list_lock);
rtnl_unlock();
return err;
}
static int
nsim_dev_hwstats_disable_ifindex(struct nsim_dev_hwstats *hwstats,
int ifindex,
enum netdev_offload_xstats_type type,
struct list_head *hwsdev_list)
{
struct nsim_dev_hwstats_netdev *hwsdev;
int err = 0;
rtnl_lock();
mutex_lock(&hwstats->hwsdev_list_lock);
hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
if (hwsdev)
list_del(&hwsdev->list);
mutex_unlock(&hwstats->hwsdev_list_lock);
if (!hwsdev) {
err = -ENOENT;
goto unlock_out;
}
if (netdev_offload_xstats_enabled(hwsdev->netdev, type)) {
netdev_offload_xstats_push_delta(hwsdev->netdev, type,
&hwsdev->stats);
rtnl_offload_xstats_notify(hwsdev->netdev);
}
nsim_dev_hwsdev_fini(hwsdev);
unlock_out:
rtnl_unlock();
return err;
}
static int
nsim_dev_hwstats_fail_ifindex(struct nsim_dev_hwstats *hwstats,
int ifindex,
enum netdev_offload_xstats_type type,
struct list_head *hwsdev_list)
{
struct nsim_dev_hwstats_netdev *hwsdev;
int err = 0;
mutex_lock(&hwstats->hwsdev_list_lock);
hwsdev = nsim_dev_hwslist_find_hwsdev(hwsdev_list, ifindex);
if (!hwsdev) {
err = -ENOENT;
goto err_hwsdev_list_unlock;
}
hwsdev->fail_enable = true;
err_hwsdev_list_unlock:
mutex_unlock(&hwstats->hwsdev_list_lock);
return err;
}
enum nsim_dev_hwstats_do {
NSIM_DEV_HWSTATS_DO_DISABLE,
NSIM_DEV_HWSTATS_DO_ENABLE,
NSIM_DEV_HWSTATS_DO_FAIL,
};
struct nsim_dev_hwstats_fops {
const struct file_operations fops;
enum nsim_dev_hwstats_do action;
enum netdev_offload_xstats_type type;
};
static ssize_t
nsim_dev_hwstats_do_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev_hwstats *hwstats = file->private_data;
struct nsim_dev_hwstats_fops *hwsfops;
struct list_head *hwsdev_list;
int ifindex;
int err;
hwsfops = container_of(debugfs_real_fops(file),
struct nsim_dev_hwstats_fops, fops);
err = kstrtoint_from_user(data, count, 0, &ifindex);
if (err)
return err;
hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, hwsfops->type);
if (WARN_ON(!hwsdev_list))
return -EINVAL;
switch (hwsfops->action) {
case NSIM_DEV_HWSTATS_DO_DISABLE:
err = nsim_dev_hwstats_disable_ifindex(hwstats, ifindex,
hwsfops->type,
hwsdev_list);
break;
case NSIM_DEV_HWSTATS_DO_ENABLE:
err = nsim_dev_hwstats_enable_ifindex(hwstats, ifindex,
hwsfops->type,
hwsdev_list);
break;
case NSIM_DEV_HWSTATS_DO_FAIL:
err = nsim_dev_hwstats_fail_ifindex(hwstats, ifindex,
hwsfops->type,
hwsdev_list);
break;
}
if (err)
return err;
return count;
}
#define NSIM_DEV_HWSTATS_FOPS(ACTION, TYPE) \
{ \
.fops = { \
.open = simple_open, \
.write = nsim_dev_hwstats_do_write, \
.llseek = generic_file_llseek, \
.owner = THIS_MODULE, \
}, \
.action = ACTION, \
.type = TYPE, \
}
static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_disable_fops =
NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_DISABLE,
NETDEV_OFFLOAD_XSTATS_TYPE_L3);
static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_enable_fops =
NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_ENABLE,
NETDEV_OFFLOAD_XSTATS_TYPE_L3);
static const struct nsim_dev_hwstats_fops nsim_dev_hwstats_l3_fail_fops =
NSIM_DEV_HWSTATS_FOPS(NSIM_DEV_HWSTATS_DO_FAIL,
NETDEV_OFFLOAD_XSTATS_TYPE_L3);
#undef NSIM_DEV_HWSTATS_FOPS
int nsim_dev_hwstats_init(struct nsim_dev *nsim_dev)
{
struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
struct net *net = nsim_dev_net(nsim_dev);
int err;
mutex_init(&hwstats->hwsdev_list_lock);
INIT_LIST_HEAD(&hwstats->l3_list);
hwstats->netdevice_nb.notifier_call = nsim_dev_netdevice_event;
err = register_netdevice_notifier_net(net, &hwstats->netdevice_nb);
if (err)
goto err_mutex_destroy;
hwstats->ddir = debugfs_create_dir("hwstats", nsim_dev->ddir);
if (IS_ERR(hwstats->ddir)) {
err = PTR_ERR(hwstats->ddir);
goto err_unregister_notifier;
}
hwstats->l3_ddir = debugfs_create_dir("l3", hwstats->ddir);
if (IS_ERR(hwstats->l3_ddir)) {
err = PTR_ERR(hwstats->l3_ddir);
goto err_remove_hwstats_recursive;
}
debugfs_create_file("enable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_enable_fops.fops);
debugfs_create_file("disable_ifindex", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_disable_fops.fops);
debugfs_create_file("fail_next_enable", 0200, hwstats->l3_ddir, hwstats,
&nsim_dev_hwstats_l3_fail_fops.fops);
INIT_DELAYED_WORK(&hwstats->traffic_dw,
&nsim_dev_hwstats_traffic_work);
schedule_delayed_work(&hwstats->traffic_dw,
msecs_to_jiffies(NSIM_DEV_HWSTATS_TRAFFIC_MS));
return 0;
err_remove_hwstats_recursive:
debugfs_remove_recursive(hwstats->ddir);
err_unregister_notifier:
unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
err_mutex_destroy:
mutex_destroy(&hwstats->hwsdev_list_lock);
return err;
}
static void nsim_dev_hwsdev_list_wipe(struct nsim_dev_hwstats *hwstats,
enum netdev_offload_xstats_type type)
{
struct nsim_dev_hwstats_netdev *hwsdev, *tmp;
struct list_head *hwsdev_list;
hwsdev_list = nsim_dev_hwstats_get_list_head(hwstats, type);
if (WARN_ON(!hwsdev_list))
return;
mutex_lock(&hwstats->hwsdev_list_lock);
list_for_each_entry_safe(hwsdev, tmp, hwsdev_list, list) {
list_del(&hwsdev->list);
nsim_dev_hwsdev_fini(hwsdev);
}
mutex_unlock(&hwstats->hwsdev_list_lock);
}
void nsim_dev_hwstats_exit(struct nsim_dev *nsim_dev)
{
struct nsim_dev_hwstats *hwstats = &nsim_dev->hwstats;
struct net *net = nsim_dev_net(nsim_dev);
cancel_delayed_work_sync(&hwstats->traffic_dw);
debugfs_remove_recursive(hwstats->ddir);
unregister_netdevice_notifier_net(net, &hwstats->netdevice_nb);
nsim_dev_hwsdev_list_wipe(hwstats, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
mutex_destroy(&hwstats->hwsdev_list_lock);
}
|
linux-master
|
drivers/net/netdevsim/hwstats.c
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Facebook
#include <linux/debugfs.h>
#include <linux/ethtool.h>
#include <linux/random.h>
#include "netdevsim.h"
static void
nsim_get_pause_stats(struct net_device *dev,
struct ethtool_pause_stats *pause_stats)
{
struct netdevsim *ns = netdev_priv(dev);
if (ns->ethtool.pauseparam.report_stats_rx)
pause_stats->rx_pause_frames = 1;
if (ns->ethtool.pauseparam.report_stats_tx)
pause_stats->tx_pause_frames = 2;
}
static void
nsim_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
{
struct netdevsim *ns = netdev_priv(dev);
pause->autoneg = 0; /* We don't support ksettings, so can't pretend */
pause->rx_pause = ns->ethtool.pauseparam.rx;
pause->tx_pause = ns->ethtool.pauseparam.tx;
}
static int
nsim_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
{
struct netdevsim *ns = netdev_priv(dev);
if (pause->autoneg)
return -EINVAL;
ns->ethtool.pauseparam.rx = pause->rx_pause;
ns->ethtool.pauseparam.tx = pause->tx_pause;
return 0;
}
static int nsim_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct netdevsim *ns = netdev_priv(dev);
memcpy(coal, &ns->ethtool.coalesce, sizeof(ns->ethtool.coalesce));
return 0;
}
static int nsim_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct netdevsim *ns = netdev_priv(dev);
memcpy(&ns->ethtool.coalesce, coal, sizeof(ns->ethtool.coalesce));
return 0;
}
static void nsim_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct netdevsim *ns = netdev_priv(dev);
memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring));
}
static int nsim_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct netdevsim *ns = netdev_priv(dev);
ns->ethtool.ring.rx_pending = ring->rx_pending;
ns->ethtool.ring.rx_jumbo_pending = ring->rx_jumbo_pending;
ns->ethtool.ring.rx_mini_pending = ring->rx_mini_pending;
ns->ethtool.ring.tx_pending = ring->tx_pending;
return 0;
}
static void
nsim_get_channels(struct net_device *dev, struct ethtool_channels *ch)
{
struct netdevsim *ns = netdev_priv(dev);
ch->max_combined = ns->nsim_bus_dev->num_queues;
ch->combined_count = ns->ethtool.channels;
}
static int
nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
{
struct netdevsim *ns = netdev_priv(dev);
int err;
err = netif_set_real_num_queues(dev, ch->combined_count,
ch->combined_count);
if (err)
return err;
ns->ethtool.channels = ch->combined_count;
return 0;
}
static int
nsim_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
{
struct netdevsim *ns = netdev_priv(dev);
if (ns->ethtool.get_err)
return -ns->ethtool.get_err;
memcpy(fecparam, &ns->ethtool.fec, sizeof(ns->ethtool.fec));
return 0;
}
static int
nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
{
struct netdevsim *ns = netdev_priv(dev);
u32 fec;
if (ns->ethtool.set_err)
return -ns->ethtool.set_err;
memcpy(&ns->ethtool.fec, fecparam, sizeof(ns->ethtool.fec));
fec = fecparam->fec;
if (fec == ETHTOOL_FEC_AUTO)
fec |= ETHTOOL_FEC_OFF;
fec |= ETHTOOL_FEC_NONE;
ns->ethtool.fec.active_fec = 1 << (fls(fec) - 1);
return 0;
}
static int nsim_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct netdevsim *ns = netdev_priv(dev);
info->phc_index = mock_phc_index(ns->phc);
return 0;
}
static const struct ethtool_ops nsim_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS,
.get_pause_stats = nsim_get_pause_stats,
.get_pauseparam = nsim_get_pauseparam,
.set_pauseparam = nsim_set_pauseparam,
.set_coalesce = nsim_set_coalesce,
.get_coalesce = nsim_get_coalesce,
.get_ringparam = nsim_get_ringparam,
.set_ringparam = nsim_set_ringparam,
.get_channels = nsim_get_channels,
.set_channels = nsim_set_channels,
.get_fecparam = nsim_get_fecparam,
.set_fecparam = nsim_set_fecparam,
.get_ts_info = nsim_get_ts_info,
};
static void nsim_ethtool_ring_init(struct netdevsim *ns)
{
ns->ethtool.ring.rx_max_pending = 4096;
ns->ethtool.ring.rx_jumbo_max_pending = 4096;
ns->ethtool.ring.rx_mini_max_pending = 4096;
ns->ethtool.ring.tx_max_pending = 4096;
}
void nsim_ethtool_init(struct netdevsim *ns)
{
struct dentry *ethtool, *dir;
ns->netdev->ethtool_ops = &nsim_ethtool_ops;
nsim_ethtool_ring_init(ns);
ns->ethtool.fec.fec = ETHTOOL_FEC_NONE;
ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE;
ns->ethtool.channels = ns->nsim_bus_dev->num_queues;
ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
debugfs_create_u32("get_err", 0600, ethtool, &ns->ethtool.get_err);
debugfs_create_u32("set_err", 0600, ethtool, &ns->ethtool.set_err);
dir = debugfs_create_dir("pause", ethtool);
debugfs_create_bool("report_stats_rx", 0600, dir,
&ns->ethtool.pauseparam.report_stats_rx);
debugfs_create_bool("report_stats_tx", 0600, dir,
&ns->ethtool.pauseparam.report_stats_tx);
dir = debugfs_create_dir("ring", ethtool);
debugfs_create_u32("rx_max_pending", 0600, dir,
&ns->ethtool.ring.rx_max_pending);
debugfs_create_u32("rx_jumbo_max_pending", 0600, dir,
&ns->ethtool.ring.rx_jumbo_max_pending);
debugfs_create_u32("rx_mini_max_pending", 0600, dir,
&ns->ethtool.ring.rx_mini_max_pending);
debugfs_create_u32("tx_max_pending", 0600, dir,
&ns->ethtool.ring.tx_max_pending);
}
|
linux-master
|
drivers/net/netdevsim/ethtool.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
#include <crypto/aead.h>
#include <linux/debugfs.h>
#include <net/xfrm.h>
#include "netdevsim.h"
#define NSIM_IPSEC_AUTH_BITS 128
static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
struct netdevsim *ns = filp->private_data;
struct nsim_ipsec *ipsec = &ns->ipsec;
size_t bufsize;
char *buf, *p;
int len;
int i;
/* the buffer needed is
* (num SAs * 3 lines each * ~60 bytes per line) + one more line
*/
bufsize = (ipsec->count * 4 * 60) + 60;
buf = kzalloc(bufsize, GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
p += scnprintf(p, bufsize - (p - buf),
"SA count=%u tx=%u\n",
ipsec->count, ipsec->tx);
for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
struct nsim_sa *sap = &ipsec->sa[i];
if (!sap->used)
continue;
p += scnprintf(p, bufsize - (p - buf),
"sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
p += scnprintf(p, bufsize - (p - buf),
"sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
i, be32_to_cpu(sap->xs->id.spi),
sap->xs->id.proto, sap->salt, sap->crypt);
p += scnprintf(p, bufsize - (p - buf),
"sa[%i] key=0x%08x %08x %08x %08x\n",
i, sap->key[0], sap->key[1],
sap->key[2], sap->key[3]);
}
len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
kfree(buf);
return len;
}
static const struct file_operations ipsec_dbg_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = nsim_dbg_netdev_ops_read,
};
static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec)
{
u32 i;
if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT)
return -ENOSPC;
/* search sa table */
for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
if (!ipsec->sa[i].used)
return i;
}
return -ENOSPC;
}
static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
if (!xs->aead) {
netdev_err(dev, "Unsupported IPsec algorithm\n");
return -EINVAL;
}
if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) {
netdev_err(dev, "IPsec offload requires %d bit authentication\n",
NSIM_IPSEC_AUTH_BITS);
return -EINVAL;
}
key_data = &xs->aead->alg_key[0];
key_len = xs->aead->alg_key_len;
alg_name = xs->aead->alg_name;
if (strcmp(alg_name, aes_gcm_name)) {
netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
aes_gcm_name);
return -EINVAL;
}
/* 160 accounts for 16 byte key and 4 byte salt */
if (key_len > NSIM_IPSEC_AUTH_BITS) {
*mysalt = ((u32 *)key_data)[4];
} else if (key_len == NSIM_IPSEC_AUTH_BITS) {
*mysalt = 0;
} else {
netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n");
return -EINVAL;
}
memcpy(mykey, key_data, 16);
return 0;
}
static int nsim_ipsec_add_sa(struct xfrm_state *xs,
struct netlink_ext_ack *extack)
{
struct nsim_ipsec *ipsec;
struct net_device *dev;
struct netdevsim *ns;
struct nsim_sa sa;
u16 sa_idx;
int ret;
dev = xs->xso.real_dev;
ns = netdev_priv(dev);
ipsec = &ns->ipsec;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload");
return -EINVAL;
}
if (xs->calg) {
NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
return -EINVAL;
}
/* find the first unused index */
ret = nsim_ipsec_find_empty_idx(ipsec);
if (ret < 0) {
NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
memset(&sa, 0, sizeof(sa));
sa.used = true;
sa.xs = xs;
if (sa.xs->id.proto & IPPROTO_ESP)
sa.crypt = xs->ealg || xs->aead;
/* get the key and salt */
ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for SA table");
return ret;
}
if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa.rx = true;
if (xs->props.family == AF_INET6)
memcpy(sa.ipaddr, &xs->id.daddr.a6, 16);
else
memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4);
}
/* the preparations worked, so save the info */
memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa));
/* the XFRM stack doesn't like offload_handle == 0,
* so add a bitflag in case our array index is 0
*/
xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID;
ipsec->count++;
return 0;
}
static void nsim_ipsec_del_sa(struct xfrm_state *xs)
{
struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
u16 sa_idx;
sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
if (!ipsec->sa[sa_idx].used) {
netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n",
sa_idx);
return;
}
memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa));
ipsec->count--;
}
static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
ipsec->ok++;
return true;
}
static const struct xfrmdev_ops nsim_xfrmdev_ops = {
.xdo_dev_state_add = nsim_ipsec_add_sa,
.xdo_dev_state_delete = nsim_ipsec_del_sa,
.xdo_dev_offload_ok = nsim_ipsec_offload_ok,
};
bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
{
struct sec_path *sp = skb_sec_path(skb);
struct nsim_ipsec *ipsec = &ns->ipsec;
struct xfrm_state *xs;
struct nsim_sa *tsa;
u32 sa_idx;
/* do we even need to check this packet? */
if (!sp)
return true;
if (unlikely(!sp->len)) {
netdev_err(ns->netdev, "no xfrm state len = %d\n",
sp->len);
return false;
}
xs = xfrm_input_state(skb);
if (unlikely(!xs)) {
netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs);
return false;
}
sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) {
netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n",
sa_idx, NSIM_IPSEC_MAX_SA_COUNT);
return false;
}
tsa = &ipsec->sa[sa_idx];
if (unlikely(!tsa->used)) {
netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx);
return false;
}
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto);
return false;
}
ipsec->tx++;
return true;
}
void nsim_ipsec_init(struct netdevsim *ns)
{
ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops;
#define NSIM_ESP_FEATURES (NETIF_F_HW_ESP | \
NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
ns->netdev->features |= NSIM_ESP_FEATURES;
ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES;
ns->ipsec.pfile = debugfs_create_file("ipsec", 0400,
ns->nsim_dev_port->ddir, ns,
&ipsec_dbg_fops);
}
void nsim_ipsec_teardown(struct netdevsim *ns)
{
struct nsim_ipsec *ipsec = &ns->ipsec;
if (ipsec->count)
netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n",
ipsec->count);
debugfs_remove_recursive(ipsec->pfile);
}
|
linux-master
|
drivers/net/netdevsim/ipsec.c
|
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2017 Netronome Systems, Inc.
* Copyright (C) 2019 Mellanox Technologies. All rights reserved
*/
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "netdevsim.h"
static DEFINE_IDA(nsim_bus_dev_ids);
static LIST_HEAD(nsim_bus_dev_list);
static DEFINE_MUTEX(nsim_bus_dev_list_lock);
static bool nsim_bus_enable;
static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
{
return container_of(dev, struct nsim_bus_dev, dev);
}
static ssize_t
nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
unsigned int num_vfs;
int ret;
ret = kstrtouint(buf, 0, &num_vfs);
if (ret)
return ret;
device_lock(dev);
ret = -ENOENT;
if (dev_get_drvdata(dev))
ret = nsim_drv_configure_vfs(nsim_bus_dev, num_vfs);
device_unlock(dev);
return ret ? ret : count;
}
static ssize_t
nsim_bus_dev_numvfs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
return sprintf(buf, "%u\n", nsim_bus_dev->num_vfs);
}
static struct device_attribute nsim_bus_dev_numvfs_attr =
__ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show,
nsim_bus_dev_numvfs_store);
static ssize_t
new_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
unsigned int port_index;
int ret;
/* Prevent to use nsim_bus_dev before initialization. */
if (!smp_load_acquire(&nsim_bus_dev->init))
return -EBUSY;
ret = kstrtouint(buf, 0, &port_index);
if (ret)
return ret;
ret = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
return ret ? ret : count;
}
static struct device_attribute nsim_bus_dev_new_port_attr = __ATTR_WO(new_port);
static ssize_t
del_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
unsigned int port_index;
int ret;
/* Prevent to use nsim_bus_dev before initialization. */
if (!smp_load_acquire(&nsim_bus_dev->init))
return -EBUSY;
ret = kstrtouint(buf, 0, &port_index);
if (ret)
return ret;
ret = nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
return ret ? ret : count;
}
static struct device_attribute nsim_bus_dev_del_port_attr = __ATTR_WO(del_port);
static struct attribute *nsim_bus_dev_attrs[] = {
&nsim_bus_dev_numvfs_attr.attr,
&nsim_bus_dev_new_port_attr.attr,
&nsim_bus_dev_del_port_attr.attr,
NULL,
};
static const struct attribute_group nsim_bus_dev_attr_group = {
.attrs = nsim_bus_dev_attrs,
};
static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
&nsim_bus_dev_attr_group,
NULL,
};
static void nsim_bus_dev_release(struct device *dev)
{
struct nsim_bus_dev *nsim_bus_dev;
nsim_bus_dev = container_of(dev, struct nsim_bus_dev, dev);
kfree(nsim_bus_dev);
}
static struct device_type nsim_bus_dev_type = {
.groups = nsim_bus_dev_attr_groups,
.release = nsim_bus_dev_release,
};
static struct nsim_bus_dev *
nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues);
static ssize_t
new_device_store(const struct bus_type *bus, const char *buf, size_t count)
{
unsigned int id, port_count, num_queues;
struct nsim_bus_dev *nsim_bus_dev;
int err;
err = sscanf(buf, "%u %u %u", &id, &port_count, &num_queues);
switch (err) {
case 1:
port_count = 1;
fallthrough;
case 2:
num_queues = 1;
fallthrough;
case 3:
if (id > INT_MAX) {
pr_err("Value of \"id\" is too big.\n");
return -EINVAL;
}
break;
default:
pr_err("Format for adding new device is \"id port_count num_queues\" (uint uint unit).\n");
return -EINVAL;
}
mutex_lock(&nsim_bus_dev_list_lock);
/* Prevent to use resource before initialization. */
if (!smp_load_acquire(&nsim_bus_enable)) {
err = -EBUSY;
goto err;
}
nsim_bus_dev = nsim_bus_dev_new(id, port_count, num_queues);
if (IS_ERR(nsim_bus_dev)) {
err = PTR_ERR(nsim_bus_dev);
goto err;
}
/* Allow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, true);
list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
mutex_unlock(&nsim_bus_dev_list_lock);
return count;
err:
mutex_unlock(&nsim_bus_dev_list_lock);
return err;
}
static BUS_ATTR_WO(new_device);
static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev);
static ssize_t
del_device_store(const struct bus_type *bus, const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev, *tmp;
unsigned int id;
int err;
err = sscanf(buf, "%u", &id);
switch (err) {
case 1:
if (id > INT_MAX) {
pr_err("Value of \"id\" is too big.\n");
return -EINVAL;
}
break;
default:
pr_err("Format for deleting device is \"id\" (uint).\n");
return -EINVAL;
}
err = -ENOENT;
mutex_lock(&nsim_bus_dev_list_lock);
/* Prevent to use resource before initialization. */
if (!smp_load_acquire(&nsim_bus_enable)) {
mutex_unlock(&nsim_bus_dev_list_lock);
return -EBUSY;
}
list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
if (nsim_bus_dev->dev.id != id)
continue;
list_del(&nsim_bus_dev->list);
nsim_bus_dev_del(nsim_bus_dev);
err = 0;
break;
}
mutex_unlock(&nsim_bus_dev_list_lock);
return !err ? count : err;
}
static BUS_ATTR_WO(del_device);
static struct attribute *nsim_bus_attrs[] = {
&bus_attr_new_device.attr,
&bus_attr_del_device.attr,
NULL
};
ATTRIBUTE_GROUPS(nsim_bus);
static int nsim_bus_probe(struct device *dev)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
return nsim_drv_probe(nsim_bus_dev);
}
static void nsim_bus_remove(struct device *dev)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
nsim_drv_remove(nsim_bus_dev);
}
static int nsim_num_vf(struct device *dev)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
return nsim_bus_dev->num_vfs;
}
static struct bus_type nsim_bus = {
.name = DRV_NAME,
.dev_name = DRV_NAME,
.bus_groups = nsim_bus_groups,
.probe = nsim_bus_probe,
.remove = nsim_bus_remove,
.num_vf = nsim_num_vf,
};
#define NSIM_BUS_DEV_MAX_VFS 4
static struct nsim_bus_dev *
nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queues)
{
struct nsim_bus_dev *nsim_bus_dev;
int err;
nsim_bus_dev = kzalloc(sizeof(*nsim_bus_dev), GFP_KERNEL);
if (!nsim_bus_dev)
return ERR_PTR(-ENOMEM);
err = ida_alloc_range(&nsim_bus_dev_ids, id, id, GFP_KERNEL);
if (err < 0)
goto err_nsim_bus_dev_free;
nsim_bus_dev->dev.id = err;
nsim_bus_dev->dev.bus = &nsim_bus;
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
nsim_bus_dev->num_queues = num_queues;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS;
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
err = device_register(&nsim_bus_dev->dev);
if (err)
goto err_nsim_bus_dev_id_free;
return nsim_bus_dev;
err_nsim_bus_dev_id_free:
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
put_device(&nsim_bus_dev->dev);
nsim_bus_dev = NULL;
err_nsim_bus_dev_free:
kfree(nsim_bus_dev);
return ERR_PTR(err);
}
static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
{
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
device_unregister(&nsim_bus_dev->dev);
}
static struct device_driver nsim_driver = {
.name = DRV_NAME,
.bus = &nsim_bus,
.owner = THIS_MODULE,
};
int nsim_bus_init(void)
{
int err;
err = bus_register(&nsim_bus);
if (err)
return err;
err = driver_register(&nsim_driver);
if (err)
goto err_bus_unregister;
/* Allow using resources */
smp_store_release(&nsim_bus_enable, true);
return 0;
err_bus_unregister:
bus_unregister(&nsim_bus);
return err;
}
void nsim_bus_exit(void)
{
struct nsim_bus_dev *nsim_bus_dev, *tmp;
/* Disallow using resources */
smp_store_release(&nsim_bus_enable, false);
mutex_lock(&nsim_bus_dev_list_lock);
list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
list_del(&nsim_bus_dev->list);
nsim_bus_dev_del(nsim_bus_dev);
}
mutex_unlock(&nsim_bus_dev_list_lock);
driver_unregister(&nsim_driver);
bus_unregister(&nsim_bus);
}
|
linux-master
|
drivers/net/netdevsim/bus.c
|
// SPDX-License-Identifier: GPL-2.0-only
// Copyright (c) 2020 Facebook Inc.
#include <linux/debugfs.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/udp_tunnel.h>
#include "netdevsim.h"
static int
nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
struct netdevsim *ns = netdev_priv(dev);
int ret;
ret = -ns->udp_ports.inject_error;
ns->udp_ports.inject_error = 0;
if (ns->udp_ports.sleep)
msleep(ns->udp_ports.sleep);
if (!ret) {
if (ns->udp_ports.ports[table][entry]) {
WARN(1, "entry already in use\n");
ret = -EBUSY;
} else {
ns->udp_ports.ports[table][entry] =
be16_to_cpu(ti->port) << 16 | ti->type;
}
}
netdev_info(dev, "set [%d, %d] type %d family %d port %d - %d\n",
table, entry, ti->type, ti->sa_family, ntohs(ti->port),
ret);
return ret;
}
static int
nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
struct netdevsim *ns = netdev_priv(dev);
int ret;
ret = -ns->udp_ports.inject_error;
ns->udp_ports.inject_error = 0;
if (ns->udp_ports.sleep)
msleep(ns->udp_ports.sleep);
if (!ret) {
u32 val = be16_to_cpu(ti->port) << 16 | ti->type;
if (val == ns->udp_ports.ports[table][entry]) {
ns->udp_ports.ports[table][entry] = 0;
} else {
WARN(1, "entry not installed %x vs %x\n",
val, ns->udp_ports.ports[table][entry]);
ret = -ENOENT;
}
}
netdev_info(dev, "unset [%d, %d] type %d family %d port %d - %d\n",
table, entry, ti->type, ti->sa_family, ntohs(ti->port),
ret);
return ret;
}
static int
nsim_udp_tunnel_sync_table(struct net_device *dev, unsigned int table)
{
struct netdevsim *ns = netdev_priv(dev);
struct udp_tunnel_info ti;
unsigned int i;
int ret;
ret = -ns->udp_ports.inject_error;
ns->udp_ports.inject_error = 0;
for (i = 0; i < NSIM_UDP_TUNNEL_N_PORTS; i++) {
udp_tunnel_nic_get_port(dev, table, i, &ti);
ns->udp_ports.ports[table][i] =
be16_to_cpu(ti.port) << 16 | ti.type;
}
return ret;
}
static const struct udp_tunnel_nic_info nsim_udp_tunnel_info = {
.set_port = nsim_udp_tunnel_set_port,
.unset_port = nsim_udp_tunnel_unset_port,
.sync_table = nsim_udp_tunnel_sync_table,
.tables = {
{
.n_entries = NSIM_UDP_TUNNEL_N_PORTS,
.tunnel_types = UDP_TUNNEL_TYPE_VXLAN,
},
{
.n_entries = NSIM_UDP_TUNNEL_N_PORTS,
.tunnel_types = UDP_TUNNEL_TYPE_GENEVE |
UDP_TUNNEL_TYPE_VXLAN_GPE,
},
},
};
static ssize_t
nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
size_t count, loff_t *ppos)
{
struct net_device *dev = file->private_data;
struct netdevsim *ns = netdev_priv(dev);
memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
rtnl_lock();
udp_tunnel_nic_reset_ntf(dev);
rtnl_unlock();
return count;
}
static const struct file_operations nsim_udp_tunnels_info_reset_fops = {
.open = simple_open,
.write = nsim_udp_tunnels_info_reset_write,
.llseek = generic_file_llseek,
.owner = THIS_MODULE,
};
int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
struct udp_tunnel_nic_info *info;
if (nsim_dev->udp_ports.shared && nsim_dev->udp_ports.open_only) {
dev_err(&nsim_dev->nsim_bus_dev->dev,
"shared can't be used in conjunction with open_only\n");
return -EINVAL;
}
if (!nsim_dev->udp_ports.shared)
ns->udp_ports.ports = ns->udp_ports.__ports;
else
ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
debugfs_create_u32("udp_ports_inject_error", 0600,
ns->nsim_dev_port->ddir,
&ns->udp_ports.inject_error);
ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0];
ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
debugfs_create_u32_array("udp_ports_table0", 0400,
ns->nsim_dev_port->ddir,
&ns->udp_ports.dfs_ports[0]);
ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1];
ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
debugfs_create_u32_array("udp_ports_table1", 0400,
ns->nsim_dev_port->ddir,
&ns->udp_ports.dfs_ports[1]);
debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir,
dev, &nsim_udp_tunnels_info_reset_fops);
/* Note: it's not normal to allocate the info struct like this!
* Drivers are expected to use a static const one, here we're testing.
*/
info = kmemdup(&nsim_udp_tunnel_info, sizeof(nsim_udp_tunnel_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
ns->udp_ports.sleep = nsim_dev->udp_ports.sleep;
if (nsim_dev->udp_ports.sync_all) {
info->set_port = NULL;
info->unset_port = NULL;
} else {
info->sync_table = NULL;
}
if (ns->udp_ports.sleep)
info->flags |= UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
if (nsim_dev->udp_ports.open_only)
info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY;
if (nsim_dev->udp_ports.ipv4_only)
info->flags |= UDP_TUNNEL_NIC_INFO_IPV4_ONLY;
if (nsim_dev->udp_ports.shared)
info->shared = &nsim_dev->udp_ports.utn_shared;
if (nsim_dev->udp_ports.static_iana_vxlan)
info->flags |= UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
dev->udp_tunnel_nic_info = info;
return 0;
}
void nsim_udp_tunnels_info_destroy(struct net_device *dev)
{
kfree(dev->udp_tunnel_nic_info);
dev->udp_tunnel_nic_info = NULL;
}
void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev)
{
debugfs_create_bool("udp_ports_sync_all", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.sync_all);
debugfs_create_bool("udp_ports_open_only", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.open_only);
debugfs_create_bool("udp_ports_ipv4_only", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.ipv4_only);
debugfs_create_bool("udp_ports_shared", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.shared);
debugfs_create_bool("udp_ports_static_iana_vxlan", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.static_iana_vxlan);
debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.sleep);
}
|
linux-master
|
drivers/net/netdevsim/udp_tunnels.c
|
/*
* Copyright (c) 2018 Cumulus Networks. All rights reserved.
* Copyright (c) 2018 David Ahern <[email protected]>
*
* This software is licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree.
*
* THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
* WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
* OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
#include <linux/bitmap.h>
#include <linux/in6.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
#include <net/fib_notifier.h>
#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
#include <net/net_namespace.h>
#include <net/nexthop.h>
#include <linux/debugfs.h>
#include "netdevsim.h"
struct nsim_fib_entry {
u64 max;
atomic64_t num;
};
struct nsim_per_fib_data {
struct nsim_fib_entry fib;
struct nsim_fib_entry rules;
};
struct nsim_fib_data {
struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
struct nsim_fib_entry nexthops;
struct rhashtable fib_rt_ht;
struct list_head fib_rt_list;
struct mutex fib_lock; /* Protects FIB HT and list */
struct notifier_block nexthop_nb;
struct rhashtable nexthop_ht;
struct devlink *devlink;
struct work_struct fib_event_work;
struct work_struct fib_flush_work;
struct list_head fib_event_queue;
spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
struct mutex nh_lock; /* Protects NH HT */
struct dentry *ddir;
bool fail_route_offload;
bool fail_res_nexthop_group_replace;
bool fail_nexthop_bucket_replace;
bool fail_route_delete;
};
struct nsim_fib_rt_key {
unsigned char addr[sizeof(struct in6_addr)];
unsigned char prefix_len;
int family;
u32 tb_id;
};
struct nsim_fib_rt {
struct nsim_fib_rt_key key;
struct rhash_head ht_node;
struct list_head list; /* Member of fib_rt_list */
};
struct nsim_fib4_rt {
struct nsim_fib_rt common;
struct fib_info *fi;
dscp_t dscp;
u8 type;
};
struct nsim_fib6_rt {
struct nsim_fib_rt common;
struct list_head nh_list;
unsigned int nhs;
};
struct nsim_fib6_rt_nh {
struct list_head list; /* Member of nh_list */
struct fib6_info *rt;
};
struct nsim_fib6_event {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
struct nsim_fib_event {
struct list_head list; /* node in fib queue */
union {
struct fib_entry_notifier_info fen_info;
struct nsim_fib6_event fib6_event;
};
struct nsim_fib_data *data;
unsigned long event;
int family;
};
static const struct rhashtable_params nsim_fib_rt_ht_params = {
.key_offset = offsetof(struct nsim_fib_rt, key),
.head_offset = offsetof(struct nsim_fib_rt, ht_node),
.key_len = sizeof(struct nsim_fib_rt_key),
.automatic_shrinking = true,
};
struct nsim_nexthop {
struct rhash_head ht_node;
u64 occ;
u32 id;
bool is_resilient;
};
static const struct rhashtable_params nsim_nexthop_ht_params = {
.key_offset = offsetof(struct nsim_nexthop, id),
.head_offset = offsetof(struct nsim_nexthop, ht_node),
.key_len = sizeof(u32),
.automatic_shrinking = true,
};
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
enum nsim_resource_id res_id, bool max)
{
struct nsim_fib_entry *entry;
switch (res_id) {
case NSIM_RESOURCE_IPV4_FIB:
entry = &fib_data->ipv4.fib;
break;
case NSIM_RESOURCE_IPV4_FIB_RULES:
entry = &fib_data->ipv4.rules;
break;
case NSIM_RESOURCE_IPV6_FIB:
entry = &fib_data->ipv6.fib;
break;
case NSIM_RESOURCE_IPV6_FIB_RULES:
entry = &fib_data->ipv6.rules;
break;
case NSIM_RESOURCE_NEXTHOPS:
entry = &fib_data->nexthops;
break;
default:
return 0;
}
return max ? entry->max : atomic64_read(&entry->num);
}
static void nsim_fib_set_max(struct nsim_fib_data *fib_data,
enum nsim_resource_id res_id, u64 val)
{
struct nsim_fib_entry *entry;
switch (res_id) {
case NSIM_RESOURCE_IPV4_FIB:
entry = &fib_data->ipv4.fib;
break;
case NSIM_RESOURCE_IPV4_FIB_RULES:
entry = &fib_data->ipv4.rules;
break;
case NSIM_RESOURCE_IPV6_FIB:
entry = &fib_data->ipv6.fib;
break;
case NSIM_RESOURCE_IPV6_FIB_RULES:
entry = &fib_data->ipv6.rules;
break;
case NSIM_RESOURCE_NEXTHOPS:
entry = &fib_data->nexthops;
break;
default:
WARN_ON(1);
return;
}
entry->max = val;
}
static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
struct netlink_ext_ack *extack)
{
int err = 0;
if (add) {
if (!atomic64_add_unless(&entry->num, 1, entry->max)) {
err = -ENOSPC;
NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib rule entries");
}
} else {
atomic64_dec_if_positive(&entry->num);
}
return err;
}
static int nsim_fib_rule_event(struct nsim_fib_data *data,
struct fib_notifier_info *info, bool add)
{
struct netlink_ext_ack *extack = info->extack;
int err = 0;
switch (info->family) {
case AF_INET:
err = nsim_fib_rule_account(&data->ipv4.rules, add, extack);
break;
case AF_INET6:
err = nsim_fib_rule_account(&data->ipv6.rules, add, extack);
break;
}
return err;
}
static int nsim_fib_account(struct nsim_fib_entry *entry, bool add)
{
int err = 0;
if (add) {
if (!atomic64_add_unless(&entry->num, 1, entry->max))
err = -ENOSPC;
} else {
atomic64_dec_if_positive(&entry->num);
}
return err;
}
static void nsim_fib_rt_init(struct nsim_fib_data *data,
struct nsim_fib_rt *fib_rt, const void *addr,
size_t addr_len, unsigned int prefix_len,
int family, u32 tb_id)
{
memcpy(fib_rt->key.addr, addr, addr_len);
fib_rt->key.prefix_len = prefix_len;
fib_rt->key.family = family;
fib_rt->key.tb_id = tb_id;
list_add(&fib_rt->list, &data->fib_rt_list);
}
static void nsim_fib_rt_fini(struct nsim_fib_rt *fib_rt)
{
list_del(&fib_rt->list);
}
static struct nsim_fib_rt *nsim_fib_rt_lookup(struct rhashtable *fib_rt_ht,
const void *addr, size_t addr_len,
unsigned int prefix_len,
int family, u32 tb_id)
{
struct nsim_fib_rt_key key;
memset(&key, 0, sizeof(key));
memcpy(key.addr, addr, addr_len);
key.prefix_len = prefix_len;
key.family = family;
key.tb_id = tb_id;
return rhashtable_lookup_fast(fib_rt_ht, &key, nsim_fib_rt_ht_params);
}
static struct nsim_fib4_rt *
nsim_fib4_rt_create(struct nsim_fib_data *data,
struct fib_entry_notifier_info *fen_info)
{
struct nsim_fib4_rt *fib4_rt;
fib4_rt = kzalloc(sizeof(*fib4_rt), GFP_KERNEL);
if (!fib4_rt)
return NULL;
nsim_fib_rt_init(data, &fib4_rt->common, &fen_info->dst, sizeof(u32),
fen_info->dst_len, AF_INET, fen_info->tb_id);
fib4_rt->fi = fen_info->fi;
fib_info_hold(fib4_rt->fi);
fib4_rt->dscp = fen_info->dscp;
fib4_rt->type = fen_info->type;
return fib4_rt;
}
static void nsim_fib4_rt_destroy(struct nsim_fib4_rt *fib4_rt)
{
fib_info_put(fib4_rt->fi);
nsim_fib_rt_fini(&fib4_rt->common);
kfree(fib4_rt);
}
static struct nsim_fib4_rt *
nsim_fib4_rt_lookup(struct rhashtable *fib_rt_ht,
const struct fib_entry_notifier_info *fen_info)
{
struct nsim_fib_rt *fib_rt;
fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &fen_info->dst, sizeof(u32),
fen_info->dst_len, AF_INET,
fen_info->tb_id);
if (!fib_rt)
return NULL;
return container_of(fib_rt, struct nsim_fib4_rt, common);
}
static void
nsim_fib4_rt_offload_failed_flag_set(struct net *net,
struct fib_entry_notifier_info *fen_info)
{
u32 *p_dst = (u32 *)&fen_info->dst;
struct fib_rt_info fri;
fri.fi = fen_info->fi;
fri.tb_id = fen_info->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = fen_info->dst_len;
fri.dscp = fen_info->dscp;
fri.type = fen_info->type;
fri.offload = false;
fri.trap = false;
fri.offload_failed = true;
fib_alias_hw_flags_set(net, &fri);
}
static void nsim_fib4_rt_hw_flags_set(struct net *net,
const struct nsim_fib4_rt *fib4_rt,
bool trap)
{
u32 *p_dst = (u32 *) fib4_rt->common.key.addr;
int dst_len = fib4_rt->common.key.prefix_len;
struct fib_rt_info fri;
fri.fi = fib4_rt->fi;
fri.tb_id = fib4_rt->common.key.tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
fri.dscp = fib4_rt->dscp;
fri.type = fib4_rt->type;
fri.offload = false;
fri.trap = trap;
fri.offload_failed = false;
fib_alias_hw_flags_set(net, &fri);
}
static int nsim_fib4_rt_add(struct nsim_fib_data *data,
struct nsim_fib4_rt *fib4_rt)
{
struct net *net = devlink_net(data->devlink);
int err;
err = rhashtable_insert_fast(&data->fib_rt_ht,
&fib4_rt->common.ht_node,
nsim_fib_rt_ht_params);
if (err)
goto err_fib_dismiss;
/* Simulate hardware programming latency. */
msleep(1);
nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
return 0;
err_fib_dismiss:
/* Drop the accounting that was increased from the notification
* context when FIB_EVENT_ENTRY_REPLACE was triggered.
*/
nsim_fib_account(&data->ipv4.fib, false);
return err;
}
static int nsim_fib4_rt_replace(struct nsim_fib_data *data,
struct nsim_fib4_rt *fib4_rt,
struct nsim_fib4_rt *fib4_rt_old)
{
struct net *net = devlink_net(data->devlink);
int err;
/* We are replacing a route, so need to remove the accounting which
* was increased when FIB_EVENT_ENTRY_REPLACE was triggered.
*/
err = nsim_fib_account(&data->ipv4.fib, false);
if (err)
return err;
err = rhashtable_replace_fast(&data->fib_rt_ht,
&fib4_rt_old->common.ht_node,
&fib4_rt->common.ht_node,
nsim_fib_rt_ht_params);
if (err)
return err;
msleep(1);
nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
nsim_fib4_rt_hw_flags_set(net, fib4_rt_old, false);
nsim_fib4_rt_destroy(fib4_rt_old);
return 0;
}
static int nsim_fib4_rt_insert(struct nsim_fib_data *data,
struct fib_entry_notifier_info *fen_info)
{
struct nsim_fib4_rt *fib4_rt, *fib4_rt_old;
int err;
if (data->fail_route_offload) {
/* For testing purposes, user set debugfs fail_route_offload
* value to true. Simulate hardware programming latency and then
* fail.
*/
msleep(1);
return -EINVAL;
}
fib4_rt = nsim_fib4_rt_create(data, fen_info);
if (!fib4_rt)
return -ENOMEM;
fib4_rt_old = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
if (!fib4_rt_old)
err = nsim_fib4_rt_add(data, fib4_rt);
else
err = nsim_fib4_rt_replace(data, fib4_rt, fib4_rt_old);
if (err)
nsim_fib4_rt_destroy(fib4_rt);
return err;
}
static void nsim_fib4_rt_remove(struct nsim_fib_data *data,
const struct fib_entry_notifier_info *fen_info)
{
struct nsim_fib4_rt *fib4_rt;
fib4_rt = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
if (!fib4_rt)
return;
rhashtable_remove_fast(&data->fib_rt_ht, &fib4_rt->common.ht_node,
nsim_fib_rt_ht_params);
nsim_fib4_rt_destroy(fib4_rt);
}
static int nsim_fib4_event(struct nsim_fib_data *data,
struct fib_entry_notifier_info *fen_info,
unsigned long event)
{
int err = 0;
switch (event) {
case FIB_EVENT_ENTRY_REPLACE:
err = nsim_fib4_rt_insert(data, fen_info);
if (err) {
struct net *net = devlink_net(data->devlink);
nsim_fib4_rt_offload_failed_flag_set(net, fen_info);
}
break;
case FIB_EVENT_ENTRY_DEL:
nsim_fib4_rt_remove(data, fen_info);
break;
default:
break;
}
return err;
}
static struct nsim_fib6_rt_nh *
nsim_fib6_rt_nh_find(const struct nsim_fib6_rt *fib6_rt,
const struct fib6_info *rt)
{
struct nsim_fib6_rt_nh *fib6_rt_nh;
list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list) {
if (fib6_rt_nh->rt == rt)
return fib6_rt_nh;
}
return NULL;
}
static int nsim_fib6_rt_nh_add(struct nsim_fib6_rt *fib6_rt,
struct fib6_info *rt)
{
struct nsim_fib6_rt_nh *fib6_rt_nh;
fib6_rt_nh = kzalloc(sizeof(*fib6_rt_nh), GFP_KERNEL);
if (!fib6_rt_nh)
return -ENOMEM;
fib6_info_hold(rt);
fib6_rt_nh->rt = rt;
list_add_tail(&fib6_rt_nh->list, &fib6_rt->nh_list);
fib6_rt->nhs++;
return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
static void nsim_rt6_release(struct fib6_info *rt)
{
fib6_info_release(rt);
}
#else
static void nsim_rt6_release(struct fib6_info *rt)
{
}
#endif
static void nsim_fib6_rt_nh_del(struct nsim_fib6_rt *fib6_rt,
const struct fib6_info *rt)
{
struct nsim_fib6_rt_nh *fib6_rt_nh;
fib6_rt_nh = nsim_fib6_rt_nh_find(fib6_rt, rt);
if (!fib6_rt_nh)
return;
fib6_rt->nhs--;
list_del(&fib6_rt_nh->list);
nsim_rt6_release(fib6_rt_nh->rt);
kfree(fib6_rt_nh);
}
static struct nsim_fib6_rt *
nsim_fib6_rt_create(struct nsim_fib_data *data,
struct fib6_info **rt_arr, unsigned int nrt6)
{
struct fib6_info *rt = rt_arr[0];
struct nsim_fib6_rt *fib6_rt;
int i = 0;
int err;
fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_KERNEL);
if (!fib6_rt)
return ERR_PTR(-ENOMEM);
nsim_fib_rt_init(data, &fib6_rt->common, &rt->fib6_dst.addr,
sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6,
rt->fib6_table->tb6_id);
/* We consider a multipath IPv6 route as one entry, but it can be made
* up from several fib6_info structs (one for each nexthop), so we
* add them all to the same list under the entry.
*/
INIT_LIST_HEAD(&fib6_rt->nh_list);
for (i = 0; i < nrt6; i++) {
err = nsim_fib6_rt_nh_add(fib6_rt, rt_arr[i]);
if (err)
goto err_fib6_rt_nh_del;
}
return fib6_rt;
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
nsim_fib6_rt_nh_del(fib6_rt, rt_arr[i]);
}
nsim_fib_rt_fini(&fib6_rt->common);
kfree(fib6_rt);
return ERR_PTR(err);
}
static void nsim_fib6_rt_destroy(struct nsim_fib6_rt *fib6_rt)
{
struct nsim_fib6_rt_nh *iter, *tmp;
list_for_each_entry_safe(iter, tmp, &fib6_rt->nh_list, list)
nsim_fib6_rt_nh_del(fib6_rt, iter->rt);
WARN_ON_ONCE(!list_empty(&fib6_rt->nh_list));
nsim_fib_rt_fini(&fib6_rt->common);
kfree(fib6_rt);
}
static struct nsim_fib6_rt *
nsim_fib6_rt_lookup(struct rhashtable *fib_rt_ht, const struct fib6_info *rt)
{
struct nsim_fib_rt *fib_rt;
fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &rt->fib6_dst.addr,
sizeof(rt->fib6_dst.addr),
rt->fib6_dst.plen, AF_INET6,
rt->fib6_table->tb6_id);
if (!fib_rt)
return NULL;
return container_of(fib_rt, struct nsim_fib6_rt, common);
}
static int nsim_fib6_rt_append(struct nsim_fib_data *data,
struct nsim_fib6_event *fib6_event)
{
struct fib6_info *rt = fib6_event->rt_arr[0];
struct nsim_fib6_rt *fib6_rt;
int i, err;
if (data->fail_route_offload) {
/* For testing purposes, user set debugfs fail_route_offload
* value to true. Simulate hardware programming latency and then
* fail.
*/
msleep(1);
return -EINVAL;
}
fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
if (!fib6_rt)
return -EINVAL;
for (i = 0; i < fib6_event->nrt6; i++) {
err = nsim_fib6_rt_nh_add(fib6_rt, fib6_event->rt_arr[i]);
if (err)
goto err_fib6_rt_nh_del;
WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
}
return 0;
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
}
return err;
}
#if IS_ENABLED(CONFIG_IPV6)
static void nsim_fib6_rt_offload_failed_flag_set(struct nsim_fib_data *data,
struct fib6_info **rt_arr,
unsigned int nrt6)
{
struct net *net = devlink_net(data->devlink);
int i;
for (i = 0; i < nrt6; i++)
fib6_info_hw_flags_set(net, rt_arr[i], false, false, true);
}
#else
static void nsim_fib6_rt_offload_failed_flag_set(struct nsim_fib_data *data,
struct fib6_info **rt_arr,
unsigned int nrt6)
{
}
#endif
#if IS_ENABLED(CONFIG_IPV6)
static void nsim_fib6_rt_hw_flags_set(struct nsim_fib_data *data,
const struct nsim_fib6_rt *fib6_rt,
bool trap)
{
struct net *net = devlink_net(data->devlink);
struct nsim_fib6_rt_nh *fib6_rt_nh;
list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list)
fib6_info_hw_flags_set(net, fib6_rt_nh->rt, false, trap, false);
}
#else
static void nsim_fib6_rt_hw_flags_set(struct nsim_fib_data *data,
const struct nsim_fib6_rt *fib6_rt,
bool trap)
{
}
#endif
static int nsim_fib6_rt_add(struct nsim_fib_data *data,
struct nsim_fib6_rt *fib6_rt)
{
int err;
err = rhashtable_insert_fast(&data->fib_rt_ht,
&fib6_rt->common.ht_node,
nsim_fib_rt_ht_params);
if (err)
goto err_fib_dismiss;
msleep(1);
nsim_fib6_rt_hw_flags_set(data, fib6_rt, true);
return 0;
err_fib_dismiss:
/* Drop the accounting that was increased from the notification
* context when FIB_EVENT_ENTRY_REPLACE was triggered.
*/
nsim_fib_account(&data->ipv6.fib, false);
return err;
}
static int nsim_fib6_rt_replace(struct nsim_fib_data *data,
struct nsim_fib6_rt *fib6_rt,
struct nsim_fib6_rt *fib6_rt_old)
{
int err;
/* We are replacing a route, so need to remove the accounting which
* was increased when FIB_EVENT_ENTRY_REPLACE was triggered.
*/
err = nsim_fib_account(&data->ipv6.fib, false);
if (err)
return err;
err = rhashtable_replace_fast(&data->fib_rt_ht,
&fib6_rt_old->common.ht_node,
&fib6_rt->common.ht_node,
nsim_fib_rt_ht_params);
if (err)
return err;
msleep(1);
nsim_fib6_rt_hw_flags_set(data, fib6_rt, true);
nsim_fib6_rt_hw_flags_set(data, fib6_rt_old, false);
nsim_fib6_rt_destroy(fib6_rt_old);
return 0;
}
static int nsim_fib6_rt_insert(struct nsim_fib_data *data,
struct nsim_fib6_event *fib6_event)
{
struct fib6_info *rt = fib6_event->rt_arr[0];
struct nsim_fib6_rt *fib6_rt, *fib6_rt_old;
int err;
if (data->fail_route_offload) {
/* For testing purposes, user set debugfs fail_route_offload
* value to true. Simulate hardware programming latency and then
* fail.
*/
msleep(1);
return -EINVAL;
}
fib6_rt = nsim_fib6_rt_create(data, fib6_event->rt_arr,
fib6_event->nrt6);
if (IS_ERR(fib6_rt))
return PTR_ERR(fib6_rt);
fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
if (!fib6_rt_old)
err = nsim_fib6_rt_add(data, fib6_rt);
else
err = nsim_fib6_rt_replace(data, fib6_rt, fib6_rt_old);
if (err)
nsim_fib6_rt_destroy(fib6_rt);
return err;
}
static void nsim_fib6_rt_remove(struct nsim_fib_data *data,
struct nsim_fib6_event *fib6_event)
{
struct fib6_info *rt = fib6_event->rt_arr[0];
struct nsim_fib6_rt *fib6_rt;
int i;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
* notification for a route we do not have. Therefore, do not warn if
* route was not found.
*/
fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
if (!fib6_rt)
return;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (fib6_event->nrt6 != fib6_rt->nhs) {
for (i = 0; i < fib6_event->nrt6; i++)
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
return;
}
rhashtable_remove_fast(&data->fib_rt_ht, &fib6_rt->common.ht_node,
nsim_fib_rt_ht_params);
nsim_fib6_rt_destroy(fib6_rt);
}
static int nsim_fib6_event_init(struct nsim_fib6_event *fib6_event,
struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
struct fib6_info *iter;
unsigned int nrt6;
int i = 0;
nrt6 = fen6_info->nsiblings + 1;
rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
if (!rt_arr)
return -ENOMEM;
fib6_event->rt_arr = rt_arr;
fib6_event->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
if (!fen6_info->nsiblings)
return 0;
list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
if (i == fen6_info->nsiblings)
break;
rt_arr[i + 1] = iter;
fib6_info_hold(iter);
i++;
}
WARN_ON_ONCE(i != fen6_info->nsiblings);
return 0;
}
static void nsim_fib6_event_fini(struct nsim_fib6_event *fib6_event)
{
int i;
for (i = 0; i < fib6_event->nrt6; i++)
nsim_rt6_release(fib6_event->rt_arr[i]);
kfree(fib6_event->rt_arr);
}
static int nsim_fib6_event(struct nsim_fib_data *data,
struct nsim_fib6_event *fib6_event,
unsigned long event)
{
int err;
if (fib6_event->rt_arr[0]->fib6_src.plen)
return 0;
switch (event) {
case FIB_EVENT_ENTRY_REPLACE:
err = nsim_fib6_rt_insert(data, fib6_event);
if (err)
goto err_rt_offload_failed_flag_set;
break;
case FIB_EVENT_ENTRY_APPEND:
err = nsim_fib6_rt_append(data, fib6_event);
if (err)
goto err_rt_offload_failed_flag_set;
break;
case FIB_EVENT_ENTRY_DEL:
nsim_fib6_rt_remove(data, fib6_event);
break;
default:
break;
}
return 0;
err_rt_offload_failed_flag_set:
nsim_fib6_rt_offload_failed_flag_set(data, fib6_event->rt_arr,
fib6_event->nrt6);
return err;
}
static void nsim_fib_event(struct nsim_fib_event *fib_event)
{
switch (fib_event->family) {
case AF_INET:
nsim_fib4_event(fib_event->data, &fib_event->fen_info,
fib_event->event);
fib_info_put(fib_event->fen_info.fi);
break;
case AF_INET6:
nsim_fib6_event(fib_event->data, &fib_event->fib6_event,
fib_event->event);
nsim_fib6_event_fini(&fib_event->fib6_event);
break;
}
}
static int nsim_fib4_prepare_event(struct fib_notifier_info *info,
struct nsim_fib_event *fib_event,
unsigned long event)
{
struct nsim_fib_data *data = fib_event->data;
struct fib_entry_notifier_info *fen_info;
struct netlink_ext_ack *extack;
int err = 0;
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
fib_event->fen_info = *fen_info;
extack = info->extack;
switch (event) {
case FIB_EVENT_ENTRY_REPLACE:
err = nsim_fib_account(&data->ipv4.fib, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries");
return err;
}
break;
case FIB_EVENT_ENTRY_DEL:
if (data->fail_route_delete) {
NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion");
return -EINVAL;
}
nsim_fib_account(&data->ipv4.fib, false);
break;
}
/* Take reference on fib_info to prevent it from being
* freed while event is queued. Release it afterwards.
*/
fib_info_hold(fib_event->fen_info.fi);
return 0;
}
static int nsim_fib6_prepare_event(struct fib_notifier_info *info,
struct nsim_fib_event *fib_event,
unsigned long event)
{
struct nsim_fib_data *data = fib_event->data;
struct fib6_entry_notifier_info *fen6_info;
struct netlink_ext_ack *extack;
int err = 0;
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
err = nsim_fib6_event_init(&fib_event->fib6_event, fen6_info);
if (err)
return err;
extack = info->extack;
switch (event) {
case FIB_EVENT_ENTRY_REPLACE:
err = nsim_fib_account(&data->ipv6.fib, true);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries");
goto err_fib6_event_fini;
}
break;
case FIB_EVENT_ENTRY_DEL:
if (data->fail_route_delete) {
err = -EINVAL;
NL_SET_ERR_MSG_MOD(extack, "Failed to process route deletion");
goto err_fib6_event_fini;
}
nsim_fib_account(&data->ipv6.fib, false);
break;
}
return 0;
err_fib6_event_fini:
nsim_fib6_event_fini(&fib_event->fib6_event);
return err;
}
static int nsim_fib_event_schedule_work(struct nsim_fib_data *data,
struct fib_notifier_info *info,
unsigned long event)
{
struct nsim_fib_event *fib_event;
int err;
if (info->family != AF_INET && info->family != AF_INET6)
/* netdevsim does not support 'RTNL_FAMILY_IP6MR' and
* 'RTNL_FAMILY_IPMR' and should ignore them.
*/
return NOTIFY_DONE;
fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
if (!fib_event)
goto err_fib_event_alloc;
fib_event->data = data;
fib_event->event = event;
fib_event->family = info->family;
switch (info->family) {
case AF_INET:
err = nsim_fib4_prepare_event(info, fib_event, event);
break;
case AF_INET6:
err = nsim_fib6_prepare_event(info, fib_event, event);
break;
}
if (err)
goto err_fib_prepare_event;
/* Enqueue the event and trigger the work */
spin_lock_bh(&data->fib_event_queue_lock);
list_add_tail(&fib_event->list, &data->fib_event_queue);
spin_unlock_bh(&data->fib_event_queue_lock);
schedule_work(&data->fib_event_work);
return NOTIFY_DONE;
err_fib_prepare_event:
kfree(fib_event);
err_fib_event_alloc:
if (event == FIB_EVENT_ENTRY_DEL)
schedule_work(&data->fib_flush_work);
return NOTIFY_BAD;
}
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
fib_nb);
struct fib_notifier_info *info = ptr;
int err;
switch (event) {
case FIB_EVENT_RULE_ADD:
case FIB_EVENT_RULE_DEL:
err = nsim_fib_rule_event(data, info,
event == FIB_EVENT_RULE_ADD);
return notifier_from_errno(err);
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
return nsim_fib_event_schedule_work(data, info, event);
}
return NOTIFY_DONE;
}
static void nsim_fib4_rt_free(struct nsim_fib_rt *fib_rt,
struct nsim_fib_data *data)
{
struct devlink *devlink = data->devlink;
struct nsim_fib4_rt *fib4_rt;
fib4_rt = container_of(fib_rt, struct nsim_fib4_rt, common);
nsim_fib4_rt_hw_flags_set(devlink_net(devlink), fib4_rt, false);
nsim_fib_account(&data->ipv4.fib, false);
nsim_fib4_rt_destroy(fib4_rt);
}
static void nsim_fib6_rt_free(struct nsim_fib_rt *fib_rt,
struct nsim_fib_data *data)
{
struct nsim_fib6_rt *fib6_rt;
fib6_rt = container_of(fib_rt, struct nsim_fib6_rt, common);
nsim_fib6_rt_hw_flags_set(data, fib6_rt, false);
nsim_fib_account(&data->ipv6.fib, false);
nsim_fib6_rt_destroy(fib6_rt);
}
static void nsim_fib_rt_free(void *ptr, void *arg)
{
struct nsim_fib_rt *fib_rt = ptr;
struct nsim_fib_data *data = arg;
switch (fib_rt->key.family) {
case AF_INET:
nsim_fib4_rt_free(fib_rt, data);
break;
case AF_INET6:
nsim_fib6_rt_free(fib_rt, data);
break;
default:
WARN_ON_ONCE(1);
}
}
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
fib_nb);
struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
/* Flush the work to make sure there is no race with notifications. */
flush_work(&data->fib_event_work);
/* The notifier block is still not registered, so we do not need to
* take any locks here.
*/
list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
nsim_fib_rt_ht_params);
nsim_fib_rt_free(fib_rt, data);
}
atomic64_set(&data->ipv4.rules.num, 0ULL);
atomic64_set(&data->ipv6.rules.num, 0ULL);
}
static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
struct nh_notifier_info *info)
{
struct nsim_nexthop *nexthop;
u64 occ = 0;
int i;
nexthop = kzalloc(sizeof(*nexthop), GFP_KERNEL);
if (!nexthop)
return ERR_PTR(-ENOMEM);
nexthop->id = info->id;
/* Determine the number of nexthop entries the new nexthop will
* occupy.
*/
switch (info->type) {
case NH_NOTIFIER_INFO_TYPE_SINGLE:
occ = 1;
break;
case NH_NOTIFIER_INFO_TYPE_GRP:
for (i = 0; i < info->nh_grp->num_nh; i++)
occ += info->nh_grp->nh_entries[i].weight;
break;
case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
occ = info->nh_res_table->num_nh_buckets;
nexthop->is_resilient = true;
break;
default:
NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
kfree(nexthop);
return ERR_PTR(-EOPNOTSUPP);
}
nexthop->occ = occ;
return nexthop;
}
static void nsim_nexthop_destroy(struct nsim_nexthop *nexthop)
{
kfree(nexthop);
}
static int nsim_nexthop_account(struct nsim_fib_data *data, u64 occ,
bool add, struct netlink_ext_ack *extack)
{
int i, err = 0;
if (add) {
for (i = 0; i < occ; i++)
if (!atomic64_add_unless(&data->nexthops.num, 1,
data->nexthops.max)) {
err = -ENOSPC;
NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported nexthops");
goto err_num_decrease;
}
} else {
if (WARN_ON(occ > atomic64_read(&data->nexthops.num)))
return -EINVAL;
atomic64_sub(occ, &data->nexthops.num);
}
return err;
err_num_decrease:
atomic64_sub(i, &data->nexthops.num);
return err;
}
static void nsim_nexthop_hw_flags_set(struct net *net,
const struct nsim_nexthop *nexthop,
bool trap)
{
int i;
nexthop_set_hw_flags(net, nexthop->id, false, trap);
if (!nexthop->is_resilient)
return;
for (i = 0; i < nexthop->occ; i++)
nexthop_bucket_set_hw_flags(net, nexthop->id, i, false, trap);
}
static int nsim_nexthop_add(struct nsim_fib_data *data,
struct nsim_nexthop *nexthop,
struct netlink_ext_ack *extack)
{
struct net *net = devlink_net(data->devlink);
int err;
err = nsim_nexthop_account(data, nexthop->occ, true, extack);
if (err)
return err;
err = rhashtable_insert_fast(&data->nexthop_ht, &nexthop->ht_node,
nsim_nexthop_ht_params);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to insert nexthop");
goto err_nexthop_dismiss;
}
nsim_nexthop_hw_flags_set(net, nexthop, true);
return 0;
err_nexthop_dismiss:
nsim_nexthop_account(data, nexthop->occ, false, extack);
return err;
}
static int nsim_nexthop_replace(struct nsim_fib_data *data,
struct nsim_nexthop *nexthop,
struct nsim_nexthop *nexthop_old,
struct netlink_ext_ack *extack)
{
struct net *net = devlink_net(data->devlink);
int err;
err = nsim_nexthop_account(data, nexthop->occ, true, extack);
if (err)
return err;
err = rhashtable_replace_fast(&data->nexthop_ht,
&nexthop_old->ht_node, &nexthop->ht_node,
nsim_nexthop_ht_params);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to replace nexthop");
goto err_nexthop_dismiss;
}
nsim_nexthop_hw_flags_set(net, nexthop, true);
nsim_nexthop_account(data, nexthop_old->occ, false, extack);
nsim_nexthop_destroy(nexthop_old);
return 0;
err_nexthop_dismiss:
nsim_nexthop_account(data, nexthop->occ, false, extack);
return err;
}
static int nsim_nexthop_insert(struct nsim_fib_data *data,
struct nh_notifier_info *info)
{
struct nsim_nexthop *nexthop, *nexthop_old;
int err;
nexthop = nsim_nexthop_create(data, info);
if (IS_ERR(nexthop))
return PTR_ERR(nexthop);
nexthop_old = rhashtable_lookup_fast(&data->nexthop_ht, &info->id,
nsim_nexthop_ht_params);
if (!nexthop_old)
err = nsim_nexthop_add(data, nexthop, info->extack);
else
err = nsim_nexthop_replace(data, nexthop, nexthop_old,
info->extack);
if (err)
nsim_nexthop_destroy(nexthop);
return err;
}
static void nsim_nexthop_remove(struct nsim_fib_data *data,
struct nh_notifier_info *info)
{
struct nsim_nexthop *nexthop;
nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &info->id,
nsim_nexthop_ht_params);
if (!nexthop)
return;
rhashtable_remove_fast(&data->nexthop_ht, &nexthop->ht_node,
nsim_nexthop_ht_params);
nsim_nexthop_account(data, nexthop->occ, false, info->extack);
nsim_nexthop_destroy(nexthop);
}
static int nsim_nexthop_res_table_pre_replace(struct nsim_fib_data *data,
struct nh_notifier_info *info)
{
if (data->fail_res_nexthop_group_replace) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace a resilient nexthop group");
return -EINVAL;
}
return 0;
}
static int nsim_nexthop_bucket_replace(struct nsim_fib_data *data,
struct nh_notifier_info *info)
{
if (data->fail_nexthop_bucket_replace) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to replace nexthop bucket");
return -EINVAL;
}
nexthop_bucket_set_hw_flags(info->net, info->id,
info->nh_res_bucket->bucket_index,
false, true);
return 0;
}
static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
nexthop_nb);
struct nh_notifier_info *info = ptr;
int err = 0;
mutex_lock(&data->nh_lock);
switch (event) {
case NEXTHOP_EVENT_REPLACE:
err = nsim_nexthop_insert(data, info);
break;
case NEXTHOP_EVENT_DEL:
nsim_nexthop_remove(data, info);
break;
case NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE:
err = nsim_nexthop_res_table_pre_replace(data, info);
break;
case NEXTHOP_EVENT_BUCKET_REPLACE:
err = nsim_nexthop_bucket_replace(data, info);
break;
default:
break;
}
mutex_unlock(&data->nh_lock);
return notifier_from_errno(err);
}
static void nsim_nexthop_free(void *ptr, void *arg)
{
struct nsim_nexthop *nexthop = ptr;
struct nsim_fib_data *data = arg;
struct net *net;
net = devlink_net(data->devlink);
nsim_nexthop_hw_flags_set(net, nexthop, false);
nsim_nexthop_account(data, nexthop->occ, false, NULL);
nsim_nexthop_destroy(nexthop);
}
static ssize_t nsim_nexthop_bucket_activity_write(struct file *file,
const char __user *user_buf,
size_t size, loff_t *ppos)
{
struct nsim_fib_data *data = file->private_data;
struct net *net = devlink_net(data->devlink);
struct nsim_nexthop *nexthop;
unsigned long *activity;
loff_t pos = *ppos;
u16 bucket_index;
char buf[128];
int err = 0;
u32 nhid;
if (pos != 0)
return -EINVAL;
if (size > sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, size))
return -EFAULT;
if (sscanf(buf, "%u %hu", &nhid, &bucket_index) != 2)
return -EINVAL;
rtnl_lock();
nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &nhid,
nsim_nexthop_ht_params);
if (!nexthop || !nexthop->is_resilient ||
bucket_index >= nexthop->occ) {
err = -EINVAL;
goto out;
}
activity = bitmap_zalloc(nexthop->occ, GFP_KERNEL);
if (!activity) {
err = -ENOMEM;
goto out;
}
bitmap_set(activity, bucket_index, 1);
nexthop_res_grp_activity_update(net, nhid, nexthop->occ, activity);
bitmap_free(activity);
out:
rtnl_unlock();
*ppos = size;
return err ?: size;
}
static const struct file_operations nsim_nexthop_bucket_activity_fops = {
.open = simple_open,
.write = nsim_nexthop_bucket_activity_write,
.llseek = no_llseek,
.owner = THIS_MODULE,
};
static u64 nsim_fib_ipv4_resource_occ_get(void *priv)
{
struct nsim_fib_data *data = priv;
return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB, false);
}
static u64 nsim_fib_ipv4_rules_res_occ_get(void *priv)
{
struct nsim_fib_data *data = priv;
return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB_RULES, false);
}
static u64 nsim_fib_ipv6_resource_occ_get(void *priv)
{
struct nsim_fib_data *data = priv;
return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB, false);
}
static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv)
{
struct nsim_fib_data *data = priv;
return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false);
}
static u64 nsim_fib_nexthops_res_occ_get(void *priv)
{
struct nsim_fib_data *data = priv;
return nsim_fib_get_val(data, NSIM_RESOURCE_NEXTHOPS, false);
}
static void nsim_fib_set_max_all(struct nsim_fib_data *data,
struct devlink *devlink)
{
static const enum nsim_resource_id res_ids[] = {
NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES,
NSIM_RESOURCE_NEXTHOPS,
};
int i;
for (i = 0; i < ARRAY_SIZE(res_ids); i++) {
int err;
u64 val;
err = devl_resource_size_get(devlink, res_ids[i], &val);
if (err)
val = (u64) -1;
nsim_fib_set_max(data, res_ids[i], val);
}
}
static void nsim_fib_event_work(struct work_struct *work)
{
struct nsim_fib_data *data = container_of(work, struct nsim_fib_data,
fib_event_work);
struct nsim_fib_event *fib_event, *next_fib_event;
LIST_HEAD(fib_event_queue);
spin_lock_bh(&data->fib_event_queue_lock);
list_splice_init(&data->fib_event_queue, &fib_event_queue);
spin_unlock_bh(&data->fib_event_queue_lock);
mutex_lock(&data->fib_lock);
list_for_each_entry_safe(fib_event, next_fib_event, &fib_event_queue,
list) {
nsim_fib_event(fib_event);
list_del(&fib_event->list);
kfree(fib_event);
cond_resched();
}
mutex_unlock(&data->fib_lock);
}
static void nsim_fib_flush_work(struct work_struct *work)
{
struct nsim_fib_data *data = container_of(work, struct nsim_fib_data,
fib_flush_work);
struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
/* Process pending work. */
flush_work(&data->fib_event_work);
mutex_lock(&data->fib_lock);
list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
nsim_fib_rt_ht_params);
nsim_fib_rt_free(fib_rt, data);
}
mutex_unlock(&data->fib_lock);
}
static int
nsim_fib_debugfs_init(struct nsim_fib_data *data, struct nsim_dev *nsim_dev)
{
data->ddir = debugfs_create_dir("fib", nsim_dev->ddir);
if (IS_ERR(data->ddir))
return PTR_ERR(data->ddir);
data->fail_route_offload = false;
debugfs_create_bool("fail_route_offload", 0600, data->ddir,
&data->fail_route_offload);
data->fail_res_nexthop_group_replace = false;
debugfs_create_bool("fail_res_nexthop_group_replace", 0600, data->ddir,
&data->fail_res_nexthop_group_replace);
data->fail_nexthop_bucket_replace = false;
debugfs_create_bool("fail_nexthop_bucket_replace", 0600, data->ddir,
&data->fail_nexthop_bucket_replace);
debugfs_create_file("nexthop_bucket_activity", 0200, data->ddir,
data, &nsim_nexthop_bucket_activity_fops);
data->fail_route_delete = false;
debugfs_create_bool("fail_route_delete", 0600, data->ddir,
&data->fail_route_delete);
return 0;
}
static void nsim_fib_debugfs_exit(struct nsim_fib_data *data)
{
debugfs_remove_recursive(data->ddir);
}
struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct nsim_fib_data *data;
struct nsim_dev *nsim_dev;
int err;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
data->devlink = devlink;
nsim_dev = devlink_priv(devlink);
err = nsim_fib_debugfs_init(data, nsim_dev);
if (err)
goto err_data_free;
mutex_init(&data->nh_lock);
err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params);
if (err)
goto err_debugfs_exit;
mutex_init(&data->fib_lock);
INIT_LIST_HEAD(&data->fib_rt_list);
err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params);
if (err)
goto err_rhashtable_nexthop_destroy;
INIT_WORK(&data->fib_event_work, nsim_fib_event_work);
INIT_WORK(&data->fib_flush_work, nsim_fib_flush_work);
INIT_LIST_HEAD(&data->fib_event_queue);
spin_lock_init(&data->fib_event_queue_lock);
nsim_fib_set_max_all(data, devlink);
data->nexthop_nb.notifier_call = nsim_nexthop_event_nb;
err = register_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb,
extack);
if (err) {
pr_err("Failed to register nexthop notifier\n");
goto err_rhashtable_fib_destroy;
}
data->fib_nb.notifier_call = nsim_fib_event_nb;
err = register_fib_notifier(devlink_net(devlink), &data->fib_nb,
nsim_fib_dump_inconsistent, extack);
if (err) {
pr_err("Failed to register fib notifier\n");
goto err_nexthop_nb_unregister;
}
devl_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV4_FIB,
nsim_fib_ipv4_resource_occ_get,
data);
devl_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV4_FIB_RULES,
nsim_fib_ipv4_rules_res_occ_get,
data);
devl_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV6_FIB,
nsim_fib_ipv6_resource_occ_get,
data);
devl_resource_occ_get_register(devlink,
NSIM_RESOURCE_IPV6_FIB_RULES,
nsim_fib_ipv6_rules_res_occ_get,
data);
devl_resource_occ_get_register(devlink,
NSIM_RESOURCE_NEXTHOPS,
nsim_fib_nexthops_res_occ_get,
data);
return data;
err_nexthop_nb_unregister:
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
err_rhashtable_fib_destroy:
cancel_work_sync(&data->fib_flush_work);
flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
err_rhashtable_nexthop_destroy:
rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free,
data);
mutex_destroy(&data->fib_lock);
err_debugfs_exit:
mutex_destroy(&data->nh_lock);
nsim_fib_debugfs_exit(data);
err_data_free:
kfree(data);
return ERR_PTR(err);
}
void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
{
devl_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_NEXTHOPS);
devl_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV6_FIB_RULES);
devl_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV6_FIB);
devl_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV4_FIB_RULES);
devl_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
cancel_work_sync(&data->fib_flush_work);
flush_work(&data->fib_event_work);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free,
data);
WARN_ON_ONCE(!list_empty(&data->fib_event_queue));
WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
mutex_destroy(&data->fib_lock);
mutex_destroy(&data->nh_lock);
nsim_fib_debugfs_exit(data);
kfree(data);
}
|
linux-master
|
drivers/net/netdevsim/fib.c
|
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree.
*
* THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
* WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
* OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
#include <linux/bpf.h>
#include <linux/bpf_verifier.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/rtnetlink.h>
#include <net/pkt_cls.h>
#include "netdevsim.h"
#define pr_vlog(env, fmt, ...) \
bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
struct nsim_bpf_bound_prog {
struct nsim_dev *nsim_dev;
struct bpf_prog *prog;
struct dentry *ddir;
const char *state;
bool is_loaded;
struct list_head l;
};
#define NSIM_BPF_MAX_KEYS 2
struct nsim_bpf_bound_map {
struct netdevsim *ns;
struct bpf_offloaded_map *map;
struct mutex mutex;
struct nsim_map_entry {
void *key;
void *value;
} entry[NSIM_BPF_MAX_KEYS];
struct list_head l;
};
static int nsim_bpf_string_show(struct seq_file *file, void *data)
{
const char **str = file->private;
if (*str)
seq_printf(file, "%s\n", *str);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(nsim_bpf_string);
static int
nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
{
struct nsim_bpf_bound_prog *state;
int ret = 0;
state = env->prog->aux->offload->dev_priv;
if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
msleep(state->nsim_dev->bpf_bind_verifier_delay);
if (insn_idx == env->prog->len - 1) {
pr_vlog(env, "Hello from netdevsim!\n");
if (!state->nsim_dev->bpf_bind_verifier_accept)
ret = -EOPNOTSUPP;
}
return ret;
}
static int nsim_bpf_finalize(struct bpf_verifier_env *env)
{
return 0;
}
static bool nsim_xdp_offload_active(struct netdevsim *ns)
{
return ns->xdp_hw.prog;
}
static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
{
struct nsim_bpf_bound_prog *state;
if (!prog || !prog->aux->offload)
return;
state = prog->aux->offload->dev_priv;
state->is_loaded = loaded;
}
static int
nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog)
{
nsim_prog_set_loaded(ns->bpf_offloaded, false);
WARN(!!ns->bpf_offloaded != oldprog,
"bad offload state, expected offload %sto be active",
oldprog ? "" : "not ");
ns->bpf_offloaded = prog;
ns->bpf_offloaded_id = prog ? prog->aux->id : 0;
nsim_prog_set_loaded(prog, true);
return 0;
}
int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct tc_cls_bpf_offload *cls_bpf = type_data;
struct bpf_prog *prog = cls_bpf->prog;
struct netdevsim *ns = cb_priv;
struct bpf_prog *oldprog;
if (type != TC_SETUP_CLSBPF) {
NSIM_EA(cls_bpf->common.extack,
"only offload of BPF classifiers supported");
return -EOPNOTSUPP;
}
if (!tc_cls_can_offload_and_chain0(ns->netdev, &cls_bpf->common))
return -EOPNOTSUPP;
if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
NSIM_EA(cls_bpf->common.extack,
"only ETH_P_ALL supported as filter protocol");
return -EOPNOTSUPP;
}
if (!ns->bpf_tc_accept) {
NSIM_EA(cls_bpf->common.extack,
"netdevsim configured to reject BPF TC offload");
return -EOPNOTSUPP;
}
/* Note: progs without skip_sw will probably not be dev bound */
if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) {
NSIM_EA(cls_bpf->common.extack,
"netdevsim configured to reject unbound programs");
return -EOPNOTSUPP;
}
if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
return -EOPNOTSUPP;
oldprog = cls_bpf->oldprog;
/* Don't remove if oldprog doesn't match driver's state */
if (ns->bpf_offloaded != oldprog) {
oldprog = NULL;
if (!cls_bpf->prog)
return 0;
if (ns->bpf_offloaded) {
NSIM_EA(cls_bpf->common.extack,
"driver and netdev offload states mismatch");
return -EBUSY;
}
}
return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
}
int nsim_bpf_disable_tc(struct netdevsim *ns)
{
if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns))
return -EBUSY;
return 0;
}
static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
{
if (!nsim_xdp_offload_active(ns) && !bpf->prog)
return 0;
if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) {
NSIM_EA(bpf->extack, "TC program is already loaded");
return -EBUSY;
}
return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
}
static int
nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
struct xdp_attachment_info *xdp)
{
int err;
if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
return -EOPNOTSUPP;
}
if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) {
NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS");
return -EOPNOTSUPP;
}
if (bpf->command == XDP_SETUP_PROG_HW) {
err = nsim_xdp_offload_prog(ns, bpf);
if (err)
return err;
}
xdp_attachment_setup(xdp, bpf);
return 0;
}
static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
char name[16];
int ret;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->nsim_dev = nsim_dev;
state->prog = prog;
state->state = "verify";
/* Program id is not populated yet when we create the state. */
sprintf(name, "%u", nsim_dev->prog_id_gen++);
state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
if (IS_ERR(state->ddir)) {
ret = PTR_ERR(state->ddir);
kfree(state);
return ret;
}
debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
debugfs_create_file("state", 0400, state->ddir,
&state->state, &nsim_bpf_string_fops);
debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
list_add_tail(&state->l, &nsim_dev->bpf_bound_progs);
prog->aux->offload->dev_priv = state;
return 0;
}
static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
{
struct nsim_dev *nsim_dev =
bpf_offload_dev_priv(prog->aux->offload->offdev);
if (!nsim_dev->bpf_bind_accept)
return -EOPNOTSUPP;
return nsim_bpf_create_prog(nsim_dev, prog);
}
static int nsim_bpf_translate(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;
state->state = "xlated";
return 0;
}
static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
state = prog->aux->offload->dev_priv;
WARN(state->is_loaded,
"offload state destroyed while program still bound");
debugfs_remove_recursive(state->ddir);
list_del(&state->l);
kfree(state);
}
static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = {
.insn_hook = nsim_bpf_verify_insn,
.finalize = nsim_bpf_finalize,
.prepare = nsim_bpf_verifier_prep,
.translate = nsim_bpf_translate,
.destroy = nsim_bpf_destroy_prog,
};
static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
{
if (bpf->prog && bpf->prog->aux->offload) {
NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
return -EINVAL;
}
if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
return -EINVAL;
}
return 0;
}
static int
nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
{
struct nsim_bpf_bound_prog *state;
if (!bpf->prog)
return 0;
if (!bpf->prog->aux->offload) {
NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
return -EINVAL;
}
state = bpf->prog->aux->offload->dev_priv;
if (WARN_ON(strcmp(state->state, "xlated"))) {
NSIM_EA(bpf->extack, "offloading program in bad state");
return -EINVAL;
}
return 0;
}
static bool
nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
{
return e->key && !memcmp(key, e->key, map->key_size);
}
static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
return i;
return -ENOENT;
}
static int
nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
nmap->entry[idx].key = kmalloc(offmap->map.key_size,
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].key)
return -ENOMEM;
nmap->entry[idx].value = kmalloc(offmap->map.value_size,
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!nmap->entry[idx].value) {
kfree(nmap->entry[idx].key);
nmap->entry[idx].key = NULL;
return -ENOMEM;
}
return 0;
}
static int
nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
int idx = -ENOENT;
mutex_lock(&nmap->mutex);
if (key)
idx = nsim_map_key_find(offmap, key);
if (idx == -ENOENT)
idx = 0;
else
idx++;
for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
if (nmap->entry[idx].key) {
memcpy(next_key, nmap->entry[idx].key,
offmap->map.key_size);
break;
}
}
mutex_unlock(&nmap->mutex);
if (idx == ARRAY_SIZE(nmap->entry))
return -ENOENT;
return 0;
}
static int
nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
int idx;
mutex_lock(&nmap->mutex);
idx = nsim_map_key_find(offmap, key);
if (idx >= 0)
memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
mutex_unlock(&nmap->mutex);
return idx < 0 ? idx : 0;
}
static int
nsim_map_update_elem(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
int idx, err = 0;
mutex_lock(&nmap->mutex);
idx = nsim_map_key_find(offmap, key);
if (idx < 0 && flags == BPF_EXIST) {
err = idx;
goto exit_unlock;
}
if (idx >= 0 && flags == BPF_NOEXIST) {
err = -EEXIST;
goto exit_unlock;
}
if (idx < 0) {
for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
if (!nmap->entry[idx].key)
break;
if (idx == ARRAY_SIZE(nmap->entry)) {
err = -E2BIG;
goto exit_unlock;
}
err = nsim_map_alloc_elem(offmap, idx);
if (err)
goto exit_unlock;
}
memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
exit_unlock:
mutex_unlock(&nmap->mutex);
return err;
}
static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
int idx;
if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
return -EINVAL;
mutex_lock(&nmap->mutex);
idx = nsim_map_key_find(offmap, key);
if (idx >= 0) {
kfree(nmap->entry[idx].key);
kfree(nmap->entry[idx].value);
memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
}
mutex_unlock(&nmap->mutex);
return idx < 0 ? idx : 0;
}
static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
.map_get_next_key = nsim_map_get_next_key,
.map_lookup_elem = nsim_map_lookup_elem,
.map_update_elem = nsim_map_update_elem,
.map_delete_elem = nsim_map_delete_elem,
};
static int
nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
{
struct nsim_bpf_bound_map *nmap;
int i, err;
if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
offmap->map.map_type != BPF_MAP_TYPE_HASH))
return -EINVAL;
if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
return -ENOMEM;
if (offmap->map.map_flags)
return -EINVAL;
nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT);
if (!nmap)
return -ENOMEM;
offmap->dev_priv = nmap;
nmap->ns = ns;
nmap->map = offmap;
mutex_init(&nmap->mutex);
if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
u32 *key;
err = nsim_map_alloc_elem(offmap, i);
if (err)
goto err_free;
key = nmap->entry[i].key;
*key = i;
memset(nmap->entry[i].value, 0, offmap->map.value_size);
}
}
offmap->dev_ops = &nsim_bpf_map_ops;
list_add_tail(&nmap->l, &ns->nsim_dev->bpf_bound_maps);
return 0;
err_free:
while (--i >= 0) {
kfree(nmap->entry[i].key);
kfree(nmap->entry[i].value);
}
kfree(nmap);
return err;
}
static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
{
struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
kfree(nmap->entry[i].key);
kfree(nmap->entry[i].value);
}
list_del_init(&nmap->l);
mutex_destroy(&nmap->mutex);
kfree(nmap);
}
int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct netdevsim *ns = netdev_priv(dev);
int err;
ASSERT_RTNL();
switch (bpf->command) {
case XDP_SETUP_PROG:
err = nsim_setup_prog_checks(ns, bpf);
if (err)
return err;
return nsim_xdp_set_prog(ns, bpf, &ns->xdp);
case XDP_SETUP_PROG_HW:
err = nsim_setup_prog_hw_checks(ns, bpf);
if (err)
return err;
return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw);
case BPF_OFFLOAD_MAP_ALLOC:
if (!ns->bpf_map_accept)
return -EOPNOTSUPP;
return nsim_bpf_map_alloc(ns, bpf->offmap);
case BPF_OFFLOAD_MAP_FREE:
nsim_bpf_map_free(bpf->offmap);
return 0;
default:
return -EINVAL;
}
}
int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
{
int err;
INIT_LIST_HEAD(&nsim_dev->bpf_bound_progs);
INIT_LIST_HEAD(&nsim_dev->bpf_bound_maps);
nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
nsim_dev->ddir);
if (IS_ERR(nsim_dev->ddir_bpf_bound_progs))
return PTR_ERR(nsim_dev->ddir_bpf_bound_progs);
nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
if (err)
return err;
nsim_dev->bpf_bind_accept = true;
debugfs_create_bool("bpf_bind_accept", 0600, nsim_dev->ddir,
&nsim_dev->bpf_bind_accept);
debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
&nsim_dev->bpf_bind_verifier_delay);
nsim_dev->bpf_bind_verifier_accept = true;
debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir,
&nsim_dev->bpf_bind_verifier_accept);
return 0;
}
void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev)
{
WARN_ON(!list_empty(&nsim_dev->bpf_bound_progs));
WARN_ON(!list_empty(&nsim_dev->bpf_bound_maps));
bpf_offload_dev_destroy(nsim_dev->bpf_dev);
}
int nsim_bpf_init(struct netdevsim *ns)
{
struct dentry *ddir = ns->nsim_dev_port->ddir;
int err;
err = bpf_offload_dev_netdev_register(ns->nsim_dev->bpf_dev,
ns->netdev);
if (err)
return err;
debugfs_create_u32("bpf_offloaded_id", 0400, ddir,
&ns->bpf_offloaded_id);
ns->bpf_tc_accept = true;
debugfs_create_bool("bpf_tc_accept", 0600, ddir,
&ns->bpf_tc_accept);
debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ddir,
&ns->bpf_tc_non_bound_accept);
ns->bpf_xdpdrv_accept = true;
debugfs_create_bool("bpf_xdpdrv_accept", 0600, ddir,
&ns->bpf_xdpdrv_accept);
ns->bpf_xdpoffload_accept = true;
debugfs_create_bool("bpf_xdpoffload_accept", 0600, ddir,
&ns->bpf_xdpoffload_accept);
ns->bpf_map_accept = true;
debugfs_create_bool("bpf_map_accept", 0600, ddir,
&ns->bpf_map_accept);
return 0;
}
void nsim_bpf_uninit(struct netdevsim *ns)
{
WARN_ON(ns->xdp.prog);
WARN_ON(ns->xdp_hw.prog);
WARN_ON(ns->bpf_offloaded);
bpf_offload_dev_netdev_unregister(ns->nsim_dev->bpf_dev, ns->netdev);
}
|
linux-master
|
drivers/net/netdevsim/bpf.c
|
// SPDX-License-Identifier: GPL-2.0
#include <net/macsec.h>
#include "netdevsim.h"
static inline u64 sci_to_cpu(sci_t sci)
{
return be64_to_cpu((__force __be64)sci);
}
static int nsim_macsec_find_secy(struct netdevsim *ns, sci_t sci)
{
int i;
for (i = 0; i < NSIM_MACSEC_MAX_SECY_COUNT; i++) {
if (ns->macsec.nsim_secy[i].sci == sci)
return i;
}
return -1;
}
static int nsim_macsec_find_rxsc(struct nsim_secy *ns_secy, sci_t sci)
{
int i;
for (i = 0; i < NSIM_MACSEC_MAX_RXSC_COUNT; i++) {
if (ns_secy->nsim_rxsc[i].sci == sci)
return i;
}
return -1;
}
static int nsim_macsec_add_secy(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
int idx;
if (ns->macsec.nsim_secy_count == NSIM_MACSEC_MAX_SECY_COUNT)
return -ENOSPC;
for (idx = 0; idx < NSIM_MACSEC_MAX_SECY_COUNT; idx++) {
if (!ns->macsec.nsim_secy[idx].used)
break;
}
if (idx == NSIM_MACSEC_MAX_SECY_COUNT) {
netdev_err(ctx->netdev, "%s: nsim_secy_count not full but all SecYs used\n",
__func__);
return -ENOSPC;
}
netdev_dbg(ctx->netdev, "%s: adding new secy with sci %08llx at index %d\n",
__func__, sci_to_cpu(ctx->secy->sci), idx);
ns->macsec.nsim_secy[idx].used = true;
ns->macsec.nsim_secy[idx].nsim_rxsc_count = 0;
ns->macsec.nsim_secy[idx].sci = ctx->secy->sci;
ns->macsec.nsim_secy_count++;
return 0;
}
static int nsim_macsec_upd_secy(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: updating secy with sci %08llx at index %d\n",
__func__, sci_to_cpu(ctx->secy->sci), idx);
return 0;
}
static int nsim_macsec_del_secy(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: removing SecY with SCI %08llx at index %d\n",
__func__, sci_to_cpu(ctx->secy->sci), idx);
ns->macsec.nsim_secy[idx].used = false;
memset(&ns->macsec.nsim_secy[idx], 0, sizeof(ns->macsec.nsim_secy[idx]));
ns->macsec.nsim_secy_count--;
return 0;
}
static int nsim_macsec_add_rxsc(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
struct nsim_secy *secy;
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
secy = &ns->macsec.nsim_secy[idx];
if (secy->nsim_rxsc_count == NSIM_MACSEC_MAX_RXSC_COUNT)
return -ENOSPC;
for (idx = 0; idx < NSIM_MACSEC_MAX_RXSC_COUNT; idx++) {
if (!secy->nsim_rxsc[idx].used)
break;
}
if (idx == NSIM_MACSEC_MAX_RXSC_COUNT)
netdev_err(ctx->netdev, "%s: nsim_rxsc_count not full but all RXSCs used\n",
__func__);
netdev_dbg(ctx->netdev, "%s: adding new rxsc with sci %08llx at index %d\n",
__func__, sci_to_cpu(ctx->rx_sc->sci), idx);
secy->nsim_rxsc[idx].used = true;
secy->nsim_rxsc[idx].sci = ctx->rx_sc->sci;
secy->nsim_rxsc_count++;
return 0;
}
static int nsim_macsec_upd_rxsc(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
struct nsim_secy *secy;
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
secy = &ns->macsec.nsim_secy[idx];
idx = nsim_macsec_find_rxsc(secy, ctx->rx_sc->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
__func__, sci_to_cpu(ctx->rx_sc->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: updating RXSC with sci %08llx at index %d\n",
__func__, sci_to_cpu(ctx->rx_sc->sci), idx);
return 0;
}
static int nsim_macsec_del_rxsc(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
struct nsim_secy *secy;
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
secy = &ns->macsec.nsim_secy[idx];
idx = nsim_macsec_find_rxsc(secy, ctx->rx_sc->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
__func__, sci_to_cpu(ctx->rx_sc->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: removing RXSC with sci %08llx at index %d\n",
__func__, sci_to_cpu(ctx->rx_sc->sci), idx);
secy->nsim_rxsc[idx].used = false;
memset(&secy->nsim_rxsc[idx], 0, sizeof(secy->nsim_rxsc[idx]));
secy->nsim_rxsc_count--;
return 0;
}
static int nsim_macsec_add_rxsa(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
struct nsim_secy *secy;
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
secy = &ns->macsec.nsim_secy[idx];
idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
__func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n",
__func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num);
return 0;
}
static int nsim_macsec_upd_rxsa(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
struct nsim_secy *secy;
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
secy = &ns->macsec.nsim_secy[idx];
idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
__func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n",
__func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num);
return 0;
}
static int nsim_macsec_del_rxsa(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
struct nsim_secy *secy;
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
secy = &ns->macsec.nsim_secy[idx];
idx = nsim_macsec_find_rxsc(secy, ctx->sa.rx_sa->sc->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in RXSC table\n",
__func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: RXSC with sci %08llx, AN %u\n",
__func__, sci_to_cpu(ctx->sa.rx_sa->sc->sci), ctx->sa.assoc_num);
return 0;
}
static int nsim_macsec_add_txsa(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n",
__func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num);
return 0;
}
static int nsim_macsec_upd_txsa(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n",
__func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num);
return 0;
}
static int nsim_macsec_del_txsa(struct macsec_context *ctx)
{
struct netdevsim *ns = netdev_priv(ctx->netdev);
int idx;
idx = nsim_macsec_find_secy(ns, ctx->secy->sci);
if (idx < 0) {
netdev_err(ctx->netdev, "%s: sci %08llx not found in secy table\n",
__func__, sci_to_cpu(ctx->secy->sci));
return -ENOENT;
}
netdev_dbg(ctx->netdev, "%s: SECY with sci %08llx, AN %u\n",
__func__, sci_to_cpu(ctx->secy->sci), ctx->sa.assoc_num);
return 0;
}
static const struct macsec_ops nsim_macsec_ops = {
.mdo_add_secy = nsim_macsec_add_secy,
.mdo_upd_secy = nsim_macsec_upd_secy,
.mdo_del_secy = nsim_macsec_del_secy,
.mdo_add_rxsc = nsim_macsec_add_rxsc,
.mdo_upd_rxsc = nsim_macsec_upd_rxsc,
.mdo_del_rxsc = nsim_macsec_del_rxsc,
.mdo_add_rxsa = nsim_macsec_add_rxsa,
.mdo_upd_rxsa = nsim_macsec_upd_rxsa,
.mdo_del_rxsa = nsim_macsec_del_rxsa,
.mdo_add_txsa = nsim_macsec_add_txsa,
.mdo_upd_txsa = nsim_macsec_upd_txsa,
.mdo_del_txsa = nsim_macsec_del_txsa,
};
void nsim_macsec_init(struct netdevsim *ns)
{
ns->netdev->macsec_ops = &nsim_macsec_ops;
ns->netdev->features |= NETIF_F_HW_MACSEC;
memset(&ns->macsec, 0, sizeof(ns->macsec));
}
void nsim_macsec_teardown(struct netdevsim *ns)
{
}
|
linux-master
|
drivers/net/netdevsim/macsec.c
|
/*
* Copyright (c) 2018 Cumulus Networks. All rights reserved.
* Copyright (c) 2018 David Ahern <[email protected]>
* Copyright (c) 2019 Mellanox Technologies. All rights reserved.
*
* This software is licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree.
*
* THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
* WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
* OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/inet.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
#include <net/devlink.h>
#include <net/ip.h>
#include <net/flow_offload.h>
#include <uapi/linux/devlink.h>
#include <uapi/linux/ip.h>
#include <uapi/linux/udp.h>
#include "netdevsim.h"
static unsigned int
nsim_dev_port_index(enum nsim_dev_port_type type, unsigned int port_index)
{
switch (type) {
case NSIM_DEV_PORT_TYPE_VF:
port_index = NSIM_DEV_VF_PORT_INDEX_BASE + port_index;
break;
case NSIM_DEV_PORT_TYPE_PF:
break;
}
return port_index;
}
static inline unsigned int nsim_dev_port_index_to_vf_index(unsigned int port_index)
{
return port_index - NSIM_DEV_VF_PORT_INDEX_BASE;
}
static struct dentry *nsim_dev_ddir;
unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev)
{
WARN_ON(!lockdep_rtnl_is_held() &&
!devl_lock_is_held(priv_to_devlink(nsim_dev)));
return nsim_dev->nsim_bus_dev->num_vfs;
}
static void
nsim_bus_dev_set_vfs(struct nsim_bus_dev *nsim_bus_dev, unsigned int num_vfs)
{
rtnl_lock();
nsim_bus_dev->num_vfs = num_vfs;
rtnl_unlock();
}
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
static int
nsim_dev_take_snapshot(struct devlink *devlink,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
void *dummy_data;
dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL);
if (!dummy_data)
return -ENOMEM;
get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
*data = dummy_data;
return 0;
}
static ssize_t nsim_dev_take_snapshot_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev *nsim_dev = file->private_data;
struct devlink *devlink;
u8 *dummy_data;
int err;
u32 id;
devlink = priv_to_devlink(nsim_dev);
err = nsim_dev_take_snapshot(devlink, NULL, NULL, &dummy_data);
if (err)
return err;
err = devlink_region_snapshot_id_get(devlink, &id);
if (err) {
pr_err("Failed to get snapshot id\n");
kfree(dummy_data);
return err;
}
err = devlink_region_snapshot_create(nsim_dev->dummy_region,
dummy_data, id);
devlink_region_snapshot_id_put(devlink, id);
if (err) {
pr_err("Failed to create region snapshot\n");
kfree(dummy_data);
return err;
}
return count;
}
static const struct file_operations nsim_dev_take_snapshot_fops = {
.open = simple_open,
.write = nsim_dev_take_snapshot_write,
.llseek = generic_file_llseek,
.owner = THIS_MODULE,
};
static ssize_t nsim_dev_trap_fa_cookie_read(struct file *file,
char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev *nsim_dev = file->private_data;
struct flow_action_cookie *fa_cookie;
unsigned int buf_len;
ssize_t ret;
char *buf;
spin_lock(&nsim_dev->fa_cookie_lock);
fa_cookie = nsim_dev->fa_cookie;
if (!fa_cookie) {
ret = -EINVAL;
goto errout;
}
buf_len = fa_cookie->cookie_len * 2;
buf = kmalloc(buf_len, GFP_ATOMIC);
if (!buf) {
ret = -ENOMEM;
goto errout;
}
bin2hex(buf, fa_cookie->cookie, fa_cookie->cookie_len);
spin_unlock(&nsim_dev->fa_cookie_lock);
ret = simple_read_from_buffer(data, count, ppos, buf, buf_len);
kfree(buf);
return ret;
errout:
spin_unlock(&nsim_dev->fa_cookie_lock);
return ret;
}
static ssize_t nsim_dev_trap_fa_cookie_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev *nsim_dev = file->private_data;
struct flow_action_cookie *fa_cookie;
size_t cookie_len;
ssize_t ret;
char *buf;
if (*ppos != 0)
return -EINVAL;
cookie_len = (count - 1) / 2;
if ((count - 1) % 2)
return -EINVAL;
buf = memdup_user(data, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
fa_cookie = kmalloc(sizeof(*fa_cookie) + cookie_len,
GFP_KERNEL | __GFP_NOWARN);
if (!fa_cookie) {
ret = -ENOMEM;
goto free_buf;
}
fa_cookie->cookie_len = cookie_len;
ret = hex2bin(fa_cookie->cookie, buf, cookie_len);
if (ret)
goto free_fa_cookie;
kfree(buf);
spin_lock(&nsim_dev->fa_cookie_lock);
kfree(nsim_dev->fa_cookie);
nsim_dev->fa_cookie = fa_cookie;
spin_unlock(&nsim_dev->fa_cookie_lock);
return count;
free_fa_cookie:
kfree(fa_cookie);
free_buf:
kfree(buf);
return ret;
}
static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
.open = simple_open,
.read = nsim_dev_trap_fa_cookie_read,
.write = nsim_dev_trap_fa_cookie_write,
.llseek = generic_file_llseek,
.owner = THIS_MODULE,
};
static ssize_t nsim_bus_dev_max_vfs_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_dev *nsim_dev = file->private_data;
char buf[11];
ssize_t len;
len = scnprintf(buf, sizeof(buf), "%u\n",
READ_ONCE(nsim_dev->nsim_bus_dev->max_vfs));
return simple_read_from_buffer(data, count, ppos, buf, len);
}
static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
const char __user *data,
size_t count, loff_t *ppos)
{
struct nsim_vf_config *vfconfigs;
struct nsim_dev *nsim_dev;
char buf[10];
ssize_t ret;
u32 val;
if (*ppos != 0)
return 0;
if (count >= sizeof(buf))
return -ENOSPC;
ret = copy_from_user(buf, data, count);
if (ret)
return -EFAULT;
buf[count] = '\0';
ret = kstrtouint(buf, 10, &val);
if (ret)
return -EINVAL;
/* max_vfs limited by the maximum number of provided port indexes */
if (val > NSIM_DEV_VF_PORT_INDEX_MAX - NSIM_DEV_VF_PORT_INDEX_BASE)
return -ERANGE;
vfconfigs = kcalloc(val, sizeof(struct nsim_vf_config),
GFP_KERNEL | __GFP_NOWARN);
if (!vfconfigs)
return -ENOMEM;
nsim_dev = file->private_data;
devl_lock(priv_to_devlink(nsim_dev));
/* Reject if VFs are configured */
if (nsim_dev_get_vfs(nsim_dev)) {
ret = -EBUSY;
} else {
swap(nsim_dev->vfconfigs, vfconfigs);
WRITE_ONCE(nsim_dev->nsim_bus_dev->max_vfs, val);
*ppos += count;
ret = count;
}
devl_unlock(priv_to_devlink(nsim_dev));
kfree(vfconfigs);
return ret;
}
static const struct file_operations nsim_dev_max_vfs_fops = {
.open = simple_open,
.read = nsim_bus_dev_max_vfs_read,
.write = nsim_bus_dev_max_vfs_write,
.llseek = generic_file_llseek,
.owner = THIS_MODULE,
};
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
char dev_ddir_name[sizeof(DRV_NAME) + 10];
int err;
sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
if (IS_ERR(nsim_dev->ddir))
return PTR_ERR(nsim_dev->ddir);
nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
if (IS_ERR(nsim_dev->ports_ddir)) {
err = PTR_ERR(nsim_dev->ports_ddir);
goto err_ddir;
}
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_status);
debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_overwrite_mask);
debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
&nsim_dev->max_macs);
debugfs_create_bool("test1", 0600, nsim_dev->ddir,
&nsim_dev->test1);
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
nsim_dev,
&nsim_dev_take_snapshot_fops);
debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
&nsim_dev->dont_allow_reload);
debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
&nsim_dev->fail_reload);
debugfs_create_file("trap_flow_action_cookie", 0600, nsim_dev->ddir,
nsim_dev, &nsim_dev_trap_fa_cookie_fops);
debugfs_create_bool("fail_trap_group_set", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_group_set);
debugfs_create_bool("fail_trap_policer_set", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_policer_set);
debugfs_create_bool("fail_trap_policer_counter_get", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_policer_counter_get);
/* caution, dev_max_vfs write takes devlink lock */
debugfs_create_file("max_vfs", 0600, nsim_dev->ddir,
nsim_dev, &nsim_dev_max_vfs_fops);
nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir);
if (IS_ERR(nsim_dev->nodes_ddir)) {
err = PTR_ERR(nsim_dev->nodes_ddir);
goto err_ports_ddir;
}
debugfs_create_bool("fail_trap_drop_counter_get", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_drop_counter_get);
nsim_udp_tunnels_debugfs_create(nsim_dev);
return 0;
err_ports_ddir:
debugfs_remove_recursive(nsim_dev->ports_ddir);
err_ddir:
debugfs_remove_recursive(nsim_dev->ddir);
return err;
}
static void nsim_dev_debugfs_exit(struct nsim_dev *nsim_dev)
{
debugfs_remove_recursive(nsim_dev->nodes_ddir);
debugfs_remove_recursive(nsim_dev->ports_ddir);
debugfs_remove_recursive(nsim_dev->ddir);
}
static ssize_t nsim_dev_rate_parent_read(struct file *file,
char __user *data,
size_t count, loff_t *ppos)
{
char **name_ptr = file->private_data;
size_t len;
if (!*name_ptr)
return 0;
len = strlen(*name_ptr);
return simple_read_from_buffer(data, count, ppos, *name_ptr, len);
}
static const struct file_operations nsim_dev_rate_parent_fops = {
.open = simple_open,
.read = nsim_dev_rate_parent_read,
.llseek = generic_file_llseek,
.owner = THIS_MODULE,
};
static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
struct nsim_dev_port *nsim_dev_port)
{
struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
unsigned int port_index = nsim_dev_port->port_index;
char port_ddir_name[16];
char dev_link_name[32];
sprintf(port_ddir_name, "%u", port_index);
nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
nsim_dev->ports_ddir);
if (IS_ERR(nsim_dev_port->ddir))
return PTR_ERR(nsim_dev_port->ddir);
sprintf(dev_link_name, "../../../" DRV_NAME "%u", nsim_bus_dev->dev.id);
if (nsim_dev_port_is_vf(nsim_dev_port)) {
unsigned int vf_id = nsim_dev_port_index_to_vf_index(port_index);
debugfs_create_u16("tx_share", 0400, nsim_dev_port->ddir,
&nsim_dev->vfconfigs[vf_id].min_tx_rate);
debugfs_create_u16("tx_max", 0400, nsim_dev_port->ddir,
&nsim_dev->vfconfigs[vf_id].max_tx_rate);
nsim_dev_port->rate_parent = debugfs_create_file("rate_parent",
0400,
nsim_dev_port->ddir,
&nsim_dev_port->parent_name,
&nsim_dev_rate_parent_fops);
}
debugfs_create_symlink("dev", nsim_dev_port->ddir, dev_link_name);
return 0;
}
static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
{
debugfs_remove_recursive(nsim_dev_port->ddir);
}
static int nsim_dev_resources_register(struct devlink *devlink)
{
struct devlink_resource_size_params params = {
.size_max = (u64)-1,
.size_granularity = 1,
.unit = DEVLINK_RESOURCE_UNIT_ENTRY
};
int err;
/* Resources for IPv4 */
err = devl_resource_register(devlink, "IPv4", (u64)-1,
NSIM_RESOURCE_IPV4,
DEVLINK_RESOURCE_ID_PARENT_TOP,
¶ms);
if (err) {
pr_err("Failed to register IPv4 top resource\n");
goto err_out;
}
err = devl_resource_register(devlink, "fib", (u64)-1,
NSIM_RESOURCE_IPV4_FIB,
NSIM_RESOURCE_IPV4, ¶ms);
if (err) {
pr_err("Failed to register IPv4 FIB resource\n");
goto err_out;
}
err = devl_resource_register(devlink, "fib-rules", (u64)-1,
NSIM_RESOURCE_IPV4_FIB_RULES,
NSIM_RESOURCE_IPV4, ¶ms);
if (err) {
pr_err("Failed to register IPv4 FIB rules resource\n");
goto err_out;
}
/* Resources for IPv6 */
err = devl_resource_register(devlink, "IPv6", (u64)-1,
NSIM_RESOURCE_IPV6,
DEVLINK_RESOURCE_ID_PARENT_TOP,
¶ms);
if (err) {
pr_err("Failed to register IPv6 top resource\n");
goto err_out;
}
err = devl_resource_register(devlink, "fib", (u64)-1,
NSIM_RESOURCE_IPV6_FIB,
NSIM_RESOURCE_IPV6, ¶ms);
if (err) {
pr_err("Failed to register IPv6 FIB resource\n");
goto err_out;
}
err = devl_resource_register(devlink, "fib-rules", (u64)-1,
NSIM_RESOURCE_IPV6_FIB_RULES,
NSIM_RESOURCE_IPV6, ¶ms);
if (err) {
pr_err("Failed to register IPv6 FIB rules resource\n");
goto err_out;
}
/* Resources for nexthops */
err = devl_resource_register(devlink, "nexthops", (u64)-1,
NSIM_RESOURCE_NEXTHOPS,
DEVLINK_RESOURCE_ID_PARENT_TOP,
¶ms);
if (err) {
pr_err("Failed to register NEXTHOPS resource\n");
goto err_out;
}
return 0;
err_out:
devl_resources_unregister(devlink);
return err;
}
enum nsim_devlink_param_id {
NSIM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
NSIM_DEVLINK_PARAM_ID_TEST1,
};
static const struct devlink_param nsim_devlink_params[] = {
DEVLINK_PARAM_GENERIC(MAX_MACS,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, NULL),
DEVLINK_PARAM_DRIVER(NSIM_DEVLINK_PARAM_ID_TEST1,
"test1", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, NULL),
};
static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev,
struct devlink *devlink)
{
union devlink_param_value value;
value.vu32 = nsim_dev->max_macs;
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
value);
value.vbool = nsim_dev->test1;
devl_param_driverinit_value_set(devlink,
NSIM_DEVLINK_PARAM_ID_TEST1,
value);
}
static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
union devlink_param_value saved_value;
int err;
err = devl_param_driverinit_value_get(devlink,
DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
&saved_value);
if (!err)
nsim_dev->max_macs = saved_value.vu32;
err = devl_param_driverinit_value_get(devlink,
NSIM_DEVLINK_PARAM_ID_TEST1,
&saved_value);
if (!err)
nsim_dev->test1 = saved_value.vbool;
}
#define NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX 16
static const struct devlink_region_ops dummy_region_ops = {
.name = "dummy",
.destructor = &kfree,
.snapshot = nsim_dev_take_snapshot,
};
static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev,
struct devlink *devlink)
{
nsim_dev->dummy_region =
devl_region_create(devlink, &dummy_region_ops,
NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
NSIM_DEV_DUMMY_REGION_SIZE);
return PTR_ERR_OR_ZERO(nsim_dev->dummy_region);
}
static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
{
devl_region_destroy(nsim_dev->dummy_region);
}
static int
__nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
unsigned int port_index);
static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
struct netlink_ext_ack *extack)
{
struct devlink *devlink = priv_to_devlink(nsim_dev);
struct nsim_dev_port *nsim_dev_port, *tmp;
devl_rate_nodes_destroy(devlink);
list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
if (nsim_dev_port_is_vf(nsim_dev_port))
__nsim_dev_port_del(nsim_dev_port);
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
return 0;
}
static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
struct netlink_ext_ack *extack)
{
struct nsim_dev_port *nsim_dev_port, *tmp;
int i, err;
for (i = 0; i < nsim_dev_get_vfs(nsim_dev); i++) {
err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_VF, i);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
goto err_port_add_vfs;
}
}
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
return 0;
err_port_add_vfs:
list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
if (nsim_dev_port_is_vf(nsim_dev_port))
__nsim_dev_port_del(nsim_dev_port);
return err;
}
static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
if (mode == nsim_dev->esw_mode)
return 0;
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
return nsim_esw_legacy_enable(nsim_dev, extack);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
return nsim_esw_switchdev_enable(nsim_dev, extack);
return -EINVAL;
}
static int nsim_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
*mode = nsim_dev->esw_mode;
return 0;
}
struct nsim_trap_item {
void *trap_ctx;
enum devlink_trap_action action;
};
struct nsim_trap_data {
struct delayed_work trap_report_dw;
struct nsim_trap_item *trap_items_arr;
u64 *trap_policers_cnt_arr;
u64 trap_pkt_cnt;
struct nsim_dev *nsim_dev;
spinlock_t trap_lock; /* Protects trap_items_arr */
};
/* All driver-specific traps must be documented in
* Documentation/networking/devlink/netdevsim.rst
*/
enum {
NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
NSIM_TRAP_ID_FID_MISS,
};
#define NSIM_TRAP_NAME_FID_MISS "fid_miss"
#define NSIM_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
#define NSIM_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
#define NSIM_TRAP_DROP_EXT(_id, _group_id, _metadata) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA | (_metadata))
#define NSIM_TRAP_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
#define NSIM_TRAP_CONTROL(_id, _group_id, _action) \
DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
#define NSIM_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, NSIM_TRAP_ID_##_id, \
NSIM_TRAP_NAME_##_id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
NSIM_TRAP_METADATA)
#define NSIM_DEV_TRAP_POLICER_MIN_RATE 1
#define NSIM_DEV_TRAP_POLICER_MAX_RATE 8000
#define NSIM_DEV_TRAP_POLICER_MIN_BURST 8
#define NSIM_DEV_TRAP_POLICER_MAX_BURST 65536
#define NSIM_TRAP_POLICER(_id, _rate, _burst) \
DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
NSIM_DEV_TRAP_POLICER_MAX_RATE, \
NSIM_DEV_TRAP_POLICER_MIN_RATE, \
NSIM_DEV_TRAP_POLICER_MAX_BURST, \
NSIM_DEV_TRAP_POLICER_MIN_BURST)
static const struct devlink_trap_policer nsim_trap_policers_arr[] = {
NSIM_TRAP_POLICER(1, 1000, 128),
NSIM_TRAP_POLICER(2, 2000, 256),
NSIM_TRAP_POLICER(3, 3000, 512),
};
static const struct devlink_trap_group nsim_trap_groups_arr[] = {
DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 1),
DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 2),
DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 3),
DEVLINK_TRAP_GROUP_GENERIC(MC_SNOOPING, 3),
};
static const struct devlink_trap nsim_traps_arr[] = {
NSIM_TRAP_DROP(SMAC_MC, L2_DROPS),
NSIM_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
NSIM_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
NSIM_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
NSIM_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
NSIM_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
NSIM_TRAP_DRIVER_EXCEPTION(FID_MISS, L2_DROPS),
NSIM_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
NSIM_TRAP_DROP(TAIL_DROP, BUFFER_DROPS),
NSIM_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
NSIM_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
NSIM_TRAP_CONTROL(IGMP_QUERY, MC_SNOOPING, MIRROR),
NSIM_TRAP_CONTROL(IGMP_V1_REPORT, MC_SNOOPING, TRAP),
};
#define NSIM_TRAP_L4_DATA_LEN 100
static struct sk_buff *nsim_dev_trap_skb_build(void)
{
int tot_len, data_len = NSIM_TRAP_L4_DATA_LEN;
struct sk_buff *skb;
struct udphdr *udph;
struct ethhdr *eth;
struct iphdr *iph;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb)
return NULL;
tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len;
skb_reset_mac_header(skb);
eth = skb_put(skb, sizeof(struct ethhdr));
eth_random_addr(eth->h_dest);
eth_random_addr(eth->h_source);
eth->h_proto = htons(ETH_P_IP);
skb->protocol = htons(ETH_P_IP);
skb_set_network_header(skb, skb->len);
iph = skb_put(skb, sizeof(struct iphdr));
iph->protocol = IPPROTO_UDP;
iph->saddr = in_aton("192.0.2.1");
iph->daddr = in_aton("198.51.100.1");
iph->version = 0x4;
iph->frag_off = 0;
iph->ihl = 0x5;
iph->tot_len = htons(tot_len);
iph->ttl = 100;
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
skb_set_transport_header(skb, skb->len);
udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len);
get_random_bytes(&udph->source, sizeof(u16));
get_random_bytes(&udph->dest, sizeof(u16));
udph->len = htons(sizeof(struct udphdr) + data_len);
return skb;
}
static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port)
{
struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev;
struct devlink *devlink = priv_to_devlink(nsim_dev);
struct nsim_trap_data *nsim_trap_data;
int i;
nsim_trap_data = nsim_dev->trap_data;
spin_lock(&nsim_trap_data->trap_lock);
for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
struct flow_action_cookie *fa_cookie = NULL;
struct nsim_trap_item *nsim_trap_item;
struct sk_buff *skb;
bool has_fa_cookie;
has_fa_cookie = nsim_traps_arr[i].metadata_cap &
DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE;
nsim_trap_item = &nsim_trap_data->trap_items_arr[i];
if (nsim_trap_item->action == DEVLINK_TRAP_ACTION_DROP)
continue;
skb = nsim_dev_trap_skb_build();
if (!skb)
continue;
skb->dev = nsim_dev_port->ns->netdev;
/* Trapped packets are usually passed to devlink in softIRQ,
* but in this case they are generated in a workqueue. Disable
* softIRQs to prevent lockdep from complaining about
* "incosistent lock state".
*/
spin_lock_bh(&nsim_dev->fa_cookie_lock);
fa_cookie = has_fa_cookie ? nsim_dev->fa_cookie : NULL;
devlink_trap_report(devlink, skb, nsim_trap_item->trap_ctx,
&nsim_dev_port->devlink_port, fa_cookie);
spin_unlock_bh(&nsim_dev->fa_cookie_lock);
consume_skb(skb);
}
spin_unlock(&nsim_trap_data->trap_lock);
}
#define NSIM_TRAP_REPORT_INTERVAL_MS 100
static void nsim_dev_trap_report_work(struct work_struct *work)
{
struct nsim_trap_data *nsim_trap_data;
struct nsim_dev_port *nsim_dev_port;
struct nsim_dev *nsim_dev;
nsim_trap_data = container_of(work, struct nsim_trap_data,
trap_report_dw.work);
nsim_dev = nsim_trap_data->nsim_dev;
/* For each running port and enabled packet trap, generate a UDP
* packet with a random 5-tuple and report it.
*/
if (!devl_trylock(priv_to_devlink(nsim_dev))) {
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 0);
return;
}
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
if (!netif_running(nsim_dev_port->ns->netdev))
continue;
nsim_dev_trap_report(nsim_dev_port);
}
devl_unlock(priv_to_devlink(nsim_dev));
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
}
static int nsim_dev_traps_init(struct devlink *devlink)
{
size_t policers_count = ARRAY_SIZE(nsim_trap_policers_arr);
struct nsim_dev *nsim_dev = devlink_priv(devlink);
struct nsim_trap_data *nsim_trap_data;
int err;
nsim_trap_data = kzalloc(sizeof(*nsim_trap_data), GFP_KERNEL);
if (!nsim_trap_data)
return -ENOMEM;
nsim_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(nsim_traps_arr),
sizeof(struct nsim_trap_item),
GFP_KERNEL);
if (!nsim_trap_data->trap_items_arr) {
err = -ENOMEM;
goto err_trap_data_free;
}
nsim_trap_data->trap_policers_cnt_arr = kcalloc(policers_count,
sizeof(u64),
GFP_KERNEL);
if (!nsim_trap_data->trap_policers_cnt_arr) {
err = -ENOMEM;
goto err_trap_items_free;
}
/* The lock is used to protect the action state of the registered
* traps. The value is written by user and read in delayed work when
* iterating over all the traps.
*/
spin_lock_init(&nsim_trap_data->trap_lock);
nsim_trap_data->nsim_dev = nsim_dev;
nsim_dev->trap_data = nsim_trap_data;
err = devl_trap_policers_register(devlink, nsim_trap_policers_arr,
policers_count);
if (err)
goto err_trap_policers_cnt_free;
err = devl_trap_groups_register(devlink, nsim_trap_groups_arr,
ARRAY_SIZE(nsim_trap_groups_arr));
if (err)
goto err_trap_policers_unregister;
err = devl_traps_register(devlink, nsim_traps_arr,
ARRAY_SIZE(nsim_traps_arr), NULL);
if (err)
goto err_trap_groups_unregister;
INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
nsim_dev_trap_report_work);
schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
return 0;
err_trap_groups_unregister:
devl_trap_groups_unregister(devlink, nsim_trap_groups_arr,
ARRAY_SIZE(nsim_trap_groups_arr));
err_trap_policers_unregister:
devl_trap_policers_unregister(devlink, nsim_trap_policers_arr,
ARRAY_SIZE(nsim_trap_policers_arr));
err_trap_policers_cnt_free:
kfree(nsim_trap_data->trap_policers_cnt_arr);
err_trap_items_free:
kfree(nsim_trap_data->trap_items_arr);
err_trap_data_free:
kfree(nsim_trap_data);
return err;
}
static void nsim_dev_traps_exit(struct devlink *devlink)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
/* caution, trap work takes devlink lock */
cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
devl_traps_unregister(devlink, nsim_traps_arr,
ARRAY_SIZE(nsim_traps_arr));
devl_trap_groups_unregister(devlink, nsim_trap_groups_arr,
ARRAY_SIZE(nsim_trap_groups_arr));
devl_trap_policers_unregister(devlink, nsim_trap_policers_arr,
ARRAY_SIZE(nsim_trap_policers_arr));
kfree(nsim_dev->trap_data->trap_policers_cnt_arr);
kfree(nsim_dev->trap_data->trap_items_arr);
kfree(nsim_dev->trap_data);
}
static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
struct netlink_ext_ack *extack);
static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev);
static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
enum devlink_reload_action action, enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
if (nsim_dev->dont_allow_reload) {
/* For testing purposes, user set debugfs dont_allow_reload
* value to true. So forbid it.
*/
NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
return -EOPNOTSUPP;
}
nsim_dev_reload_destroy(nsim_dev);
return 0;
}
static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_action action,
enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
if (nsim_dev->fail_reload) {
/* For testing purposes, user set debugfs fail_reload
* value to true. Fail right away.
*/
NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
return -EINVAL;
}
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
return nsim_dev_reload_create(nsim_dev, extack);
}
static int nsim_dev_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
int err;
err = devlink_info_version_stored_put_ext(req, "fw.mgmt", "10.20.30",
DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err)
return err;
return devlink_info_version_running_put_ext(req, "fw.mgmt", "10.20.30",
DEVLINK_INFO_VERSION_TYPE_COMPONENT);
}
#define NSIM_DEV_FLASH_SIZE 500000
#define NSIM_DEV_FLASH_CHUNK_SIZE 1000
#define NSIM_DEV_FLASH_CHUNK_TIME_MS 10
static int nsim_dev_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
int i;
if ((params->overwrite_mask & ~nsim_dev->fw_update_overwrite_mask) != 0)
return -EOPNOTSUPP;
if (nsim_dev->fw_update_status) {
devlink_flash_update_status_notify(devlink,
"Preparing to flash",
params->component, 0, 0);
}
for (i = 0; i < NSIM_DEV_FLASH_SIZE / NSIM_DEV_FLASH_CHUNK_SIZE; i++) {
if (nsim_dev->fw_update_status)
devlink_flash_update_status_notify(devlink, "Flashing",
params->component,
i * NSIM_DEV_FLASH_CHUNK_SIZE,
NSIM_DEV_FLASH_SIZE);
msleep(NSIM_DEV_FLASH_CHUNK_TIME_MS);
}
if (nsim_dev->fw_update_status) {
devlink_flash_update_status_notify(devlink, "Flashing",
params->component,
NSIM_DEV_FLASH_SIZE,
NSIM_DEV_FLASH_SIZE);
devlink_flash_update_timeout_notify(devlink, "Flash select",
params->component, 81);
devlink_flash_update_status_notify(devlink, "Flashing done",
params->component, 0, 0);
}
return 0;
}
static struct nsim_trap_item *
nsim_dev_trap_item_lookup(struct nsim_dev *nsim_dev, u16 trap_id)
{
struct nsim_trap_data *nsim_trap_data = nsim_dev->trap_data;
int i;
for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
if (nsim_traps_arr[i].id == trap_id)
return &nsim_trap_data->trap_items_arr[i];
}
return NULL;
}
static int nsim_dev_devlink_trap_init(struct devlink *devlink,
const struct devlink_trap *trap,
void *trap_ctx)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
struct nsim_trap_item *nsim_trap_item;
nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id);
if (WARN_ON(!nsim_trap_item))
return -ENOENT;
nsim_trap_item->trap_ctx = trap_ctx;
nsim_trap_item->action = trap->init_action;
return 0;
}
static int
nsim_dev_devlink_trap_action_set(struct devlink *devlink,
const struct devlink_trap *trap,
enum devlink_trap_action action,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
struct nsim_trap_item *nsim_trap_item;
nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id);
if (WARN_ON(!nsim_trap_item))
return -ENOENT;
spin_lock(&nsim_dev->trap_data->trap_lock);
nsim_trap_item->action = action;
spin_unlock(&nsim_dev->trap_data->trap_lock);
return 0;
}
static int
nsim_dev_devlink_trap_group_set(struct devlink *devlink,
const struct devlink_trap_group *group,
const struct devlink_trap_policer *policer,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
if (nsim_dev->fail_trap_group_set)
return -EINVAL;
return 0;
}
static int
nsim_dev_devlink_trap_policer_set(struct devlink *devlink,
const struct devlink_trap_policer *policer,
u64 rate, u64 burst,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
if (nsim_dev->fail_trap_policer_set) {
NL_SET_ERR_MSG_MOD(extack, "User setup the operation to fail for testing purposes");
return -EINVAL;
}
return 0;
}
static int
nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
const struct devlink_trap_policer *policer,
u64 *p_drops)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
u64 *cnt;
if (nsim_dev->fail_trap_policer_counter_get)
return -EINVAL;
cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1];
*p_drops = (*cnt)++;
return 0;
}
#define NSIM_LINK_SPEED_MAX 5000 /* Mbps */
#define NSIM_LINK_SPEED_UNIT 125000 /* 1 Mbps given in bytes/sec to avoid
* u64 overflow during conversion from
* bytes to bits.
*/
static int nsim_rate_bytes_to_units(char *name, u64 *rate, struct netlink_ext_ack *extack)
{
u64 val;
u32 rem;
val = div_u64_rem(*rate, NSIM_LINK_SPEED_UNIT, &rem);
if (rem) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
name, *rate);
NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps.");
return -EINVAL;
}
if (val > NSIM_LINK_SPEED_MAX) {
pr_err("%s rate value %lluMbps exceed link maximum speed 5000Mbps.\n",
name, val);
NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed 5000Mbps.");
return -EINVAL;
}
*rate = val;
return 0;
}
static int nsim_leaf_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
struct nsim_dev_port *nsim_dev_port = priv;
struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev;
int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index);
int err;
err = nsim_rate_bytes_to_units("tx_share", &tx_share, extack);
if (err)
return err;
nsim_dev->vfconfigs[vf_id].min_tx_rate = tx_share;
return 0;
}
static int nsim_leaf_tx_max_set(struct devlink_rate *devlink_rate, void *priv,
u64 tx_max, struct netlink_ext_ack *extack)
{
struct nsim_dev_port *nsim_dev_port = priv;
struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev;
int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index);
int err;
err = nsim_rate_bytes_to_units("tx_max", &tx_max, extack);
if (err)
return err;
nsim_dev->vfconfigs[vf_id].max_tx_rate = tx_max;
return 0;
}
struct nsim_rate_node {
struct dentry *ddir;
struct dentry *rate_parent;
char *parent_name;
u16 tx_share;
u16 tx_max;
};
static int nsim_node_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
struct nsim_rate_node *nsim_node = priv;
int err;
err = nsim_rate_bytes_to_units("tx_share", &tx_share, extack);
if (err)
return err;
nsim_node->tx_share = tx_share;
return 0;
}
static int nsim_node_tx_max_set(struct devlink_rate *devlink_rate, void *priv,
u64 tx_max, struct netlink_ext_ack *extack)
{
struct nsim_rate_node *nsim_node = priv;
int err;
err = nsim_rate_bytes_to_units("tx_max", &tx_max, extack);
if (err)
return err;
nsim_node->tx_max = tx_max;
return 0;
}
static int nsim_rate_node_new(struct devlink_rate *node, void **priv,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(node->devlink);
struct nsim_rate_node *nsim_node;
if (!nsim_esw_mode_is_switchdev(nsim_dev)) {
NL_SET_ERR_MSG_MOD(extack, "Node creation allowed only in switchdev mode.");
return -EOPNOTSUPP;
}
nsim_node = kzalloc(sizeof(*nsim_node), GFP_KERNEL);
if (!nsim_node)
return -ENOMEM;
nsim_node->ddir = debugfs_create_dir(node->name, nsim_dev->nodes_ddir);
debugfs_create_u16("tx_share", 0400, nsim_node->ddir, &nsim_node->tx_share);
debugfs_create_u16("tx_max", 0400, nsim_node->ddir, &nsim_node->tx_max);
nsim_node->rate_parent = debugfs_create_file("rate_parent", 0400,
nsim_node->ddir,
&nsim_node->parent_name,
&nsim_dev_rate_parent_fops);
*priv = nsim_node;
return 0;
}
static int nsim_rate_node_del(struct devlink_rate *node, void *priv,
struct netlink_ext_ack *extack)
{
struct nsim_rate_node *nsim_node = priv;
debugfs_remove(nsim_node->rate_parent);
debugfs_remove_recursive(nsim_node->ddir);
kfree(nsim_node);
return 0;
}
static int nsim_rate_leaf_parent_set(struct devlink_rate *child,
struct devlink_rate *parent,
void *priv_child, void *priv_parent,
struct netlink_ext_ack *extack)
{
struct nsim_dev_port *nsim_dev_port = priv_child;
if (parent)
nsim_dev_port->parent_name = parent->name;
else
nsim_dev_port->parent_name = NULL;
return 0;
}
static int nsim_rate_node_parent_set(struct devlink_rate *child,
struct devlink_rate *parent,
void *priv_child, void *priv_parent,
struct netlink_ext_ack *extack)
{
struct nsim_rate_node *nsim_node = priv_child;
if (parent)
nsim_node->parent_name = parent->name;
else
nsim_node->parent_name = NULL;
return 0;
}
static int
nsim_dev_devlink_trap_drop_counter_get(struct devlink *devlink,
const struct devlink_trap *trap,
u64 *p_drops)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
u64 *cnt;
if (nsim_dev->fail_trap_drop_counter_get)
return -EINVAL;
cnt = &nsim_dev->trap_data->trap_pkt_cnt;
*p_drops = (*cnt)++;
return 0;
}
static const struct devlink_ops nsim_dev_devlink_ops = {
.eswitch_mode_set = nsim_devlink_eswitch_mode_set,
.eswitch_mode_get = nsim_devlink_eswitch_mode_get,
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
.info_get = nsim_dev_info_get,
.flash_update = nsim_dev_flash_update,
.trap_init = nsim_dev_devlink_trap_init,
.trap_action_set = nsim_dev_devlink_trap_action_set,
.trap_group_set = nsim_dev_devlink_trap_group_set,
.trap_policer_set = nsim_dev_devlink_trap_policer_set,
.trap_policer_counter_get = nsim_dev_devlink_trap_policer_counter_get,
.rate_leaf_tx_share_set = nsim_leaf_tx_share_set,
.rate_leaf_tx_max_set = nsim_leaf_tx_max_set,
.rate_node_tx_share_set = nsim_node_tx_share_set,
.rate_node_tx_max_set = nsim_node_tx_max_set,
.rate_node_new = nsim_rate_node_new,
.rate_node_del = nsim_rate_node_del,
.rate_leaf_parent_set = nsim_rate_leaf_parent_set,
.rate_node_parent_set = nsim_rate_node_parent_set,
.trap_drop_counter_get = nsim_dev_devlink_trap_drop_counter_get,
};
#define NSIM_DEV_MAX_MACS_DEFAULT 32
#define NSIM_DEV_TEST1_DEFAULT true
static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct devlink_port_attrs attrs = {};
struct nsim_dev_port *nsim_dev_port;
struct devlink_port *devlink_port;
int err;
if (type == NSIM_DEV_PORT_TYPE_VF && !nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
if (!nsim_dev_port)
return -ENOMEM;
nsim_dev_port->port_index = nsim_dev_port_index(type, port_index);
nsim_dev_port->port_type = type;
devlink_port = &nsim_dev_port->devlink_port;
if (nsim_dev_port_is_pf(nsim_dev_port)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = port_index + 1;
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
attrs.pci_vf.pf = 0;
attrs.pci_vf.vf = port_index;
}
memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
devlink_port_attrs_set(devlink_port, &attrs);
err = devl_port_register(priv_to_devlink(nsim_dev), devlink_port,
nsim_dev_port->port_index);
if (err)
goto err_port_free;
err = nsim_dev_port_debugfs_init(nsim_dev, nsim_dev_port);
if (err)
goto err_dl_port_unregister;
nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port);
if (IS_ERR(nsim_dev_port->ns)) {
err = PTR_ERR(nsim_dev_port->ns);
goto err_port_debugfs_exit;
}
if (nsim_dev_port_is_vf(nsim_dev_port)) {
err = devl_rate_leaf_create(&nsim_dev_port->devlink_port,
nsim_dev_port, NULL);
if (err)
goto err_nsim_destroy;
}
list_add(&nsim_dev_port->list, &nsim_dev->port_list);
return 0;
err_nsim_destroy:
nsim_destroy(nsim_dev_port->ns);
err_port_debugfs_exit:
nsim_dev_port_debugfs_exit(nsim_dev_port);
err_dl_port_unregister:
devl_port_unregister(devlink_port);
err_port_free:
kfree(nsim_dev_port);
return err;
}
static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
{
struct devlink_port *devlink_port = &nsim_dev_port->devlink_port;
list_del(&nsim_dev_port->list);
if (nsim_dev_port_is_vf(nsim_dev_port))
devl_rate_leaf_destroy(&nsim_dev_port->devlink_port);
nsim_destroy(nsim_dev_port->ns);
nsim_dev_port_debugfs_exit(nsim_dev_port);
devl_port_unregister(devlink_port);
kfree(nsim_dev_port);
}
static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
{
struct nsim_dev_port *nsim_dev_port, *tmp;
list_for_each_entry_safe(nsim_dev_port, tmp,
&nsim_dev->port_list, list)
__nsim_dev_port_del(nsim_dev_port);
}
static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
unsigned int port_count)
{
int i, err;
for (i = 0; i < port_count; i++) {
err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i);
if (err)
goto err_port_del_all;
}
return 0;
err_port_del_all:
nsim_dev_port_del_all(nsim_dev);
return err;
}
static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
struct netlink_ext_ack *extack)
{
struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
struct devlink *devlink;
int err;
devlink = priv_to_devlink(nsim_dev);
nsim_dev = devlink_priv(devlink);
INIT_LIST_HEAD(&nsim_dev->port_list);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
nsim_devlink_param_load_driverinit_values(devlink);
err = nsim_dev_dummy_region_init(nsim_dev, devlink);
if (err)
return err;
err = nsim_dev_traps_init(devlink);
if (err)
goto err_dummy_region_exit;
nsim_dev->fib_data = nsim_fib_create(devlink, extack);
if (IS_ERR(nsim_dev->fib_data)) {
err = PTR_ERR(nsim_dev->fib_data);
goto err_traps_exit;
}
err = nsim_dev_health_init(nsim_dev, devlink);
if (err)
goto err_fib_destroy;
err = nsim_dev_psample_init(nsim_dev);
if (err)
goto err_health_exit;
err = nsim_dev_hwstats_init(nsim_dev);
if (err)
goto err_psample_exit;
err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
if (err)
goto err_hwstats_exit;
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
nsim_dev,
&nsim_dev_take_snapshot_fops);
return 0;
err_hwstats_exit:
nsim_dev_hwstats_exit(nsim_dev);
err_psample_exit:
nsim_dev_psample_exit(nsim_dev);
err_health_exit:
nsim_dev_health_exit(nsim_dev);
err_fib_destroy:
nsim_fib_destroy(devlink, nsim_dev->fib_data);
err_traps_exit:
nsim_dev_traps_exit(devlink);
err_dummy_region_exit:
nsim_dev_dummy_region_exit(nsim_dev);
return err;
}
int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev;
struct devlink *devlink;
int err;
devlink = devlink_alloc_ns(&nsim_dev_devlink_ops, sizeof(*nsim_dev),
nsim_bus_dev->initial_net, &nsim_bus_dev->dev);
if (!devlink)
return -ENOMEM;
devl_lock(devlink);
nsim_dev = devlink_priv(devlink);
nsim_dev->nsim_bus_dev = nsim_bus_dev;
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
INIT_LIST_HEAD(&nsim_dev->port_list);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
nsim_dev->vfconfigs = kcalloc(nsim_bus_dev->max_vfs,
sizeof(struct nsim_vf_config),
GFP_KERNEL | __GFP_NOWARN);
if (!nsim_dev->vfconfigs) {
err = -ENOMEM;
goto err_devlink_unlock;
}
err = devl_register(devlink);
if (err)
goto err_vfc_free;
err = nsim_dev_resources_register(devlink);
if (err)
goto err_dl_unregister;
err = devl_params_register(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
if (err)
goto err_resource_unregister;
nsim_devlink_set_params_init_values(nsim_dev, devlink);
err = nsim_dev_dummy_region_init(nsim_dev, devlink);
if (err)
goto err_params_unregister;
err = nsim_dev_traps_init(devlink);
if (err)
goto err_dummy_region_exit;
err = nsim_dev_debugfs_init(nsim_dev);
if (err)
goto err_traps_exit;
nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
if (IS_ERR(nsim_dev->fib_data)) {
err = PTR_ERR(nsim_dev->fib_data);
goto err_debugfs_exit;
}
err = nsim_dev_health_init(nsim_dev, devlink);
if (err)
goto err_fib_destroy;
err = nsim_bpf_dev_init(nsim_dev);
if (err)
goto err_health_exit;
err = nsim_dev_psample_init(nsim_dev);
if (err)
goto err_bpf_dev_exit;
err = nsim_dev_hwstats_init(nsim_dev);
if (err)
goto err_psample_exit;
err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
if (err)
goto err_hwstats_exit;
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
devl_unlock(devlink);
return 0;
err_hwstats_exit:
nsim_dev_hwstats_exit(nsim_dev);
err_psample_exit:
nsim_dev_psample_exit(nsim_dev);
err_bpf_dev_exit:
nsim_bpf_dev_exit(nsim_dev);
err_health_exit:
nsim_dev_health_exit(nsim_dev);
err_fib_destroy:
nsim_fib_destroy(devlink, nsim_dev->fib_data);
err_debugfs_exit:
nsim_dev_debugfs_exit(nsim_dev);
err_traps_exit:
nsim_dev_traps_exit(devlink);
err_dummy_region_exit:
nsim_dev_dummy_region_exit(nsim_dev);
err_params_unregister:
devl_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
err_resource_unregister:
devl_resources_unregister(devlink);
err_dl_unregister:
devl_unregister(devlink);
err_vfc_free:
kfree(nsim_dev->vfconfigs);
err_devlink_unlock:
devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
return err;
}
static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
{
struct devlink *devlink = priv_to_devlink(nsim_dev);
if (devlink_is_reload_failed(devlink))
return;
debugfs_remove(nsim_dev->take_snapshot);
if (nsim_dev_get_vfs(nsim_dev)) {
nsim_bus_dev_set_vfs(nsim_dev->nsim_bus_dev, 0);
if (nsim_esw_mode_is_switchdev(nsim_dev))
nsim_esw_legacy_enable(nsim_dev, NULL);
}
nsim_dev_port_del_all(nsim_dev);
nsim_dev_hwstats_exit(nsim_dev);
nsim_dev_psample_exit(nsim_dev);
nsim_dev_health_exit(nsim_dev);
nsim_fib_destroy(devlink, nsim_dev->fib_data);
nsim_dev_traps_exit(devlink);
nsim_dev_dummy_region_exit(nsim_dev);
}
void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct devlink *devlink = priv_to_devlink(nsim_dev);
devl_lock(devlink);
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
devl_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
devl_resources_unregister(devlink);
devl_unregister(devlink);
kfree(nsim_dev->vfconfigs);
kfree(nsim_dev->fa_cookie);
devl_unlock(devlink);
devlink_free(devlink);
dev_set_drvdata(&nsim_bus_dev->dev, NULL);
}
static struct nsim_dev_port *
__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev_port *nsim_dev_port;
port_index = nsim_dev_port_index(type, port_index);
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list)
if (nsim_dev_port->port_index == port_index)
return nsim_dev_port;
return NULL;
}
int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
int err;
devl_lock(priv_to_devlink(nsim_dev));
if (__nsim_dev_port_lookup(nsim_dev, type, port_index))
err = -EEXIST;
else
err = __nsim_dev_port_add(nsim_dev, type, port_index);
devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct nsim_dev_port *nsim_dev_port;
int err = 0;
devl_lock(priv_to_devlink(nsim_dev));
nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, type, port_index);
if (!nsim_dev_port)
err = -ENOENT;
else
__nsim_dev_port_del(nsim_dev_port);
devl_unlock(priv_to_devlink(nsim_dev));
return err;
}
int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
unsigned int num_vfs)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct devlink *devlink = priv_to_devlink(nsim_dev);
int ret = 0;
devl_lock(devlink);
if (nsim_bus_dev->num_vfs == num_vfs)
goto exit_unlock;
if (nsim_bus_dev->num_vfs && num_vfs) {
ret = -EBUSY;
goto exit_unlock;
}
if (nsim_bus_dev->max_vfs < num_vfs) {
ret = -ENOMEM;
goto exit_unlock;
}
nsim_bus_dev_set_vfs(nsim_bus_dev, num_vfs);
if (nsim_esw_mode_is_switchdev(nsim_dev)) {
if (num_vfs) {
ret = nsim_esw_switchdev_enable(nsim_dev, NULL);
if (ret) {
nsim_bus_dev_set_vfs(nsim_bus_dev, 0);
goto exit_unlock;
}
} else {
nsim_esw_legacy_enable(nsim_dev, NULL);
}
}
exit_unlock:
devl_unlock(devlink);
return ret;
}
int nsim_dev_init(void)
{
nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
return PTR_ERR_OR_ZERO(nsim_dev_ddir);
}
void nsim_dev_exit(void)
{
debugfs_remove_recursive(nsim_dev_ddir);
}
|
linux-master
|
drivers/net/netdevsim/dev.c
|
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree.
*
* THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
* WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
* OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
* THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
*/
#include <linux/debugfs.h>
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/netlink.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
#include <net/udp_tunnel.h>
#include "netdevsim.h"
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
if (!nsim_ipsec_tx(ns, skb))
goto out;
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
ns->tx_bytes += skb->len;
u64_stats_update_end(&ns->syncp);
out:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static void nsim_set_rx_mode(struct net_device *dev)
{
}
static int nsim_change_mtu(struct net_device *dev, int new_mtu)
{
struct netdevsim *ns = netdev_priv(dev);
if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
dev->mtu = new_mtu;
return 0;
}
static void
nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct netdevsim *ns = netdev_priv(dev);
unsigned int start;
do {
start = u64_stats_fetch_begin(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
} while (u64_stats_fetch_retry(&ns->syncp, start));
}
static int
nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{
return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
}
static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct netdevsim *ns = netdev_priv(dev);
struct nsim_dev *nsim_dev = ns->nsim_dev;
/* Only refuse multicast addresses, zero address can mean unset/any. */
if (vf >= nsim_dev_get_vfs(nsim_dev) || is_multicast_ether_addr(mac))
return -EINVAL;
memcpy(nsim_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
return 0;
}
static int nsim_set_vf_vlan(struct net_device *dev, int vf,
u16 vlan, u8 qos, __be16 vlan_proto)
{
struct netdevsim *ns = netdev_priv(dev);
struct nsim_dev *nsim_dev = ns->nsim_dev;
if (vf >= nsim_dev_get_vfs(nsim_dev) || vlan > 4095 || qos > 7)
return -EINVAL;
nsim_dev->vfconfigs[vf].vlan = vlan;
nsim_dev->vfconfigs[vf].qos = qos;
nsim_dev->vfconfigs[vf].vlan_proto = vlan_proto;
return 0;
}
static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
{
struct netdevsim *ns = netdev_priv(dev);
struct nsim_dev *nsim_dev = ns->nsim_dev;
if (nsim_esw_mode_is_switchdev(ns->nsim_dev)) {
pr_err("Not supported in switchdev mode. Please use devlink API.\n");
return -EOPNOTSUPP;
}
if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
nsim_dev->vfconfigs[vf].min_tx_rate = min;
nsim_dev->vfconfigs[vf].max_tx_rate = max;
return 0;
}
static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
struct nsim_dev *nsim_dev = ns->nsim_dev;
if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
nsim_dev->vfconfigs[vf].spoofchk_enabled = val;
return 0;
}
static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
struct nsim_dev *nsim_dev = ns->nsim_dev;
if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
nsim_dev->vfconfigs[vf].rss_query_enabled = val;
return 0;
}
static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
struct nsim_dev *nsim_dev = ns->nsim_dev;
if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
nsim_dev->vfconfigs[vf].trusted = val;
return 0;
}
static int
nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
{
struct netdevsim *ns = netdev_priv(dev);
struct nsim_dev *nsim_dev = ns->nsim_dev;
if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
ivi->vf = vf;
ivi->linkstate = nsim_dev->vfconfigs[vf].link_state;
ivi->min_tx_rate = nsim_dev->vfconfigs[vf].min_tx_rate;
ivi->max_tx_rate = nsim_dev->vfconfigs[vf].max_tx_rate;
ivi->vlan = nsim_dev->vfconfigs[vf].vlan;
ivi->vlan_proto = nsim_dev->vfconfigs[vf].vlan_proto;
ivi->qos = nsim_dev->vfconfigs[vf].qos;
memcpy(&ivi->mac, nsim_dev->vfconfigs[vf].vf_mac, ETH_ALEN);
ivi->spoofchk = nsim_dev->vfconfigs[vf].spoofchk_enabled;
ivi->trusted = nsim_dev->vfconfigs[vf].trusted;
ivi->rss_query_en = nsim_dev->vfconfigs[vf].rss_query_enabled;
return 0;
}
static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
{
struct netdevsim *ns = netdev_priv(dev);
struct nsim_dev *nsim_dev = ns->nsim_dev;
if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
switch (state) {
case IFLA_VF_LINK_STATE_AUTO:
case IFLA_VF_LINK_STATE_ENABLE:
case IFLA_VF_LINK_STATE_DISABLE:
break;
default:
return -EINVAL;
}
nsim_dev->vfconfigs[vf].link_state = state;
return 0;
}
static void nsim_taprio_stats(struct tc_taprio_qopt_stats *stats)
{
stats->window_drops = 0;
stats->tx_overruns = 0;
}
static int nsim_setup_tc_taprio(struct net_device *dev,
struct tc_taprio_qopt_offload *offload)
{
int err = 0;
switch (offload->cmd) {
case TAPRIO_CMD_REPLACE:
case TAPRIO_CMD_DESTROY:
break;
case TAPRIO_CMD_STATS:
nsim_taprio_stats(&offload->stats);
break;
default:
err = -EOPNOTSUPP;
}
return err;
}
static LIST_HEAD(nsim_block_cb_list);
static int
nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
{
struct netdevsim *ns = netdev_priv(dev);
switch (type) {
case TC_SETUP_QDISC_TAPRIO:
return nsim_setup_tc_taprio(dev, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&nsim_block_cb_list,
nsim_setup_tc_block_cb,
ns, ns, true);
default:
return -EOPNOTSUPP;
}
}
static int
nsim_set_features(struct net_device *dev, netdev_features_t features)
{
struct netdevsim *ns = netdev_priv(dev);
if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC))
return nsim_bpf_disable_tc(ns);
return 0;
}
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nsim_change_mtu,
.ndo_get_stats64 = nsim_get_stats64,
.ndo_set_vf_mac = nsim_set_vf_mac,
.ndo_set_vf_vlan = nsim_set_vf_vlan,
.ndo_set_vf_rate = nsim_set_vf_rate,
.ndo_set_vf_spoofchk = nsim_set_vf_spoofchk,
.ndo_set_vf_trust = nsim_set_vf_trust,
.ndo_get_vf_config = nsim_get_vf_config,
.ndo_set_vf_link_state = nsim_set_vf_link_state,
.ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
};
static const struct net_device_ops nsim_vf_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nsim_change_mtu,
.ndo_get_stats64 = nsim_get_stats64,
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
};
static void nsim_setup(struct net_device *dev)
{
ether_setup(dev);
eth_hw_addr_random(dev);
dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
IFF_NO_QUEUE;
dev->features |= NETIF_F_HIGHDMA |
NETIF_F_SG |
NETIF_F_FRAGLIST |
NETIF_F_HW_CSUM |
NETIF_F_TSO;
dev->hw_features |= NETIF_F_HW_TC;
dev->max_mtu = ETH_MAX_MTU;
dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
}
static int nsim_init_netdevsim(struct netdevsim *ns)
{
struct mock_phc *phc;
int err;
phc = mock_phc_create(&ns->nsim_bus_dev->dev);
if (IS_ERR(phc))
return PTR_ERR(phc);
ns->phc = phc;
ns->netdev->netdev_ops = &nsim_netdev_ops;
err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
if (err)
goto err_phc_destroy;
rtnl_lock();
err = nsim_bpf_init(ns);
if (err)
goto err_utn_destroy;
nsim_macsec_init(ns);
nsim_ipsec_init(ns);
err = register_netdevice(ns->netdev);
if (err)
goto err_ipsec_teardown;
rtnl_unlock();
return 0;
err_ipsec_teardown:
nsim_ipsec_teardown(ns);
nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
err_utn_destroy:
rtnl_unlock();
nsim_udp_tunnels_info_destroy(ns->netdev);
err_phc_destroy:
mock_phc_destroy(ns->phc);
return err;
}
static int nsim_init_netdevsim_vf(struct netdevsim *ns)
{
int err;
ns->netdev->netdev_ops = &nsim_vf_netdev_ops;
rtnl_lock();
err = register_netdevice(ns->netdev);
rtnl_unlock();
return err;
}
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
{
struct net_device *dev;
struct netdevsim *ns;
int err;
dev = alloc_netdev_mq(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup,
nsim_dev->nsim_bus_dev->num_queues);
if (!dev)
return ERR_PTR(-ENOMEM);
dev_net_set(dev, nsim_dev_net(nsim_dev));
ns = netdev_priv(dev);
ns->netdev = dev;
u64_stats_init(&ns->syncp);
ns->nsim_dev = nsim_dev;
ns->nsim_dev_port = nsim_dev_port;
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
SET_NETDEV_DEVLINK_PORT(dev, &nsim_dev_port->devlink_port);
nsim_ethtool_init(ns);
if (nsim_dev_port_is_pf(nsim_dev_port))
err = nsim_init_netdevsim(ns);
else
err = nsim_init_netdevsim_vf(ns);
if (err)
goto err_free_netdev;
return ns;
err_free_netdev:
free_netdev(dev);
return ERR_PTR(err);
}
void nsim_destroy(struct netdevsim *ns)
{
struct net_device *dev = ns->netdev;
rtnl_lock();
unregister_netdevice(dev);
if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
nsim_macsec_teardown(ns);
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
}
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
nsim_udp_tunnels_info_destroy(dev);
mock_phc_destroy(ns->phc);
free_netdev(dev);
}
static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
NL_SET_ERR_MSG_MOD(extack,
"Please use: echo \"[ID] [PORT_COUNT] [NUM_QUEUES]\" > /sys/bus/netdevsim/new_device");
return -EOPNOTSUPP;
}
static struct rtnl_link_ops nsim_link_ops __read_mostly = {
.kind = DRV_NAME,
.validate = nsim_validate,
};
static int __init nsim_module_init(void)
{
int err;
err = nsim_dev_init();
if (err)
return err;
err = nsim_bus_init();
if (err)
goto err_dev_exit;
err = rtnl_link_register(&nsim_link_ops);
if (err)
goto err_bus_exit;
return 0;
err_bus_exit:
nsim_bus_exit();
err_dev_exit:
nsim_dev_exit();
return err;
}
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
nsim_bus_exit();
nsim_dev_exit();
}
module_init(nsim_module_init);
module_exit(nsim_module_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
|
linux-master
|
drivers/net/netdevsim/netdev.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "queueing.h"
#include "timers.h"
#include "device.h"
#include "peer.h"
#include "socket.h"
#include "messages.h"
#include "cookie.h"
#include <linux/uio.h>
#include <linux/inetdevice.h>
#include <linux/socket.h>
#include <net/ip_tunnels.h>
#include <net/udp.h>
#include <net/sock.h>
static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
{
struct message_handshake_initiation packet;
if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
REKEY_TIMEOUT))
return; /* This function is rate limited. */
atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr);
if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
wg_timers_any_authenticated_packet_traversal(peer);
wg_timers_any_authenticated_packet_sent(peer);
atomic64_set(&peer->last_sent_handshake,
ktime_get_coarse_boottime_ns());
wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
HANDSHAKE_DSCP);
wg_timers_handshake_initiated(peer);
}
}
void wg_packet_handshake_send_worker(struct work_struct *work)
{
struct wg_peer *peer = container_of(work, struct wg_peer,
transmit_handshake_work);
wg_packet_send_handshake_initiation(peer);
wg_peer_put(peer);
}
void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
bool is_retry)
{
if (!is_retry)
peer->timer_handshake_attempts = 0;
rcu_read_lock_bh();
/* We check last_sent_handshake here in addition to the actual function
* we're queueing up, so that we don't queue things if not strictly
* necessary:
*/
if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
REKEY_TIMEOUT) ||
unlikely(READ_ONCE(peer->is_dead)))
goto out;
wg_peer_get(peer);
/* Queues up calling packet_send_queued_handshakes(peer), where we do a
* peer_put(peer) after:
*/
if (!queue_work(peer->device->handshake_send_wq,
&peer->transmit_handshake_work))
/* If the work was already queued, we want to drop the
* extra reference:
*/
wg_peer_put(peer);
out:
rcu_read_unlock_bh();
}
void wg_packet_send_handshake_response(struct wg_peer *peer)
{
struct message_handshake_response packet;
atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr);
if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
if (wg_noise_handshake_begin_session(&peer->handshake,
&peer->keypairs)) {
wg_timers_session_derived(peer);
wg_timers_any_authenticated_packet_traversal(peer);
wg_timers_any_authenticated_packet_sent(peer);
atomic64_set(&peer->last_sent_handshake,
ktime_get_coarse_boottime_ns());
wg_socket_send_buffer_to_peer(peer, &packet,
sizeof(packet),
HANDSHAKE_DSCP);
}
}
}
void wg_packet_send_handshake_cookie(struct wg_device *wg,
struct sk_buff *initiating_skb,
__le32 sender_index)
{
struct message_handshake_cookie packet;
net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
wg->dev->name, initiating_skb);
wg_cookie_message_create(&packet, initiating_skb, sender_index,
&wg->cookie_checker);
wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
sizeof(packet));
}
static void keep_key_fresh(struct wg_peer *peer)
{
struct noise_keypair *keypair;
bool send;
rcu_read_lock_bh();
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
send = keypair && READ_ONCE(keypair->sending.is_valid) &&
(atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES ||
(keypair->i_am_the_initiator &&
wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
rcu_read_unlock_bh();
if (unlikely(send))
wg_packet_send_queued_handshake_initiation(peer, false);
}
static unsigned int calculate_skb_padding(struct sk_buff *skb)
{
unsigned int padded_size, last_unit = skb->len;
if (unlikely(!PACKET_CB(skb)->mtu))
return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit;
/* We do this modulo business with the MTU, just in case the networking
* layer gives us a packet that's bigger than the MTU. In that case, we
* wouldn't want the final subtraction to overflow in the case of the
* padded_size being clamped. Fortunately, that's very rarely the case,
* so we optimize for that not happening.
*/
if (unlikely(last_unit > PACKET_CB(skb)->mtu))
last_unit %= PACKET_CB(skb)->mtu;
padded_size = min(PACKET_CB(skb)->mtu,
ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
return padded_size - last_unit;
}
static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
{
unsigned int padding_len, plaintext_len, trailer_len;
struct scatterlist sg[MAX_SKB_FRAGS + 8];
struct message_data *header;
struct sk_buff *trailer;
int num_frags;
/* Force hash calculation before encryption so that flow analysis is
* consistent over the inner packet.
*/
skb_get_hash(skb);
/* Calculate lengths. */
padding_len = calculate_skb_padding(skb);
trailer_len = padding_len + noise_encrypted_len(0);
plaintext_len = skb->len + padding_len;
/* Expand data section to have room for padding and auth tag. */
num_frags = skb_cow_data(skb, trailer_len, &trailer);
if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
return false;
/* Set the padding to zeros, and make sure it and the auth tag are part
* of the skb.
*/
memset(skb_tail_pointer(trailer), 0, padding_len);
/* Expand head section to have room for our header and the network
* stack's headers.
*/
if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
return false;
/* Finalize checksum calculation for the inner packet, if required. */
if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
skb_checksum_help(skb)))
return false;
/* Only after checksumming can we safely add on the padding at the end
* and the header.
*/
skb_set_inner_network_header(skb, 0);
header = (struct message_data *)skb_push(skb, sizeof(*header));
header->header.type = cpu_to_le32(MESSAGE_DATA);
header->key_idx = keypair->remote_index;
header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
pskb_put(skb, trailer, trailer_len);
/* Now we can encrypt the scattergather segments */
sg_init_table(sg, num_frags);
if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
noise_encrypted_len(plaintext_len)) <= 0)
return false;
return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
PACKET_CB(skb)->nonce,
keypair->sending.key);
}
void wg_packet_send_keepalive(struct wg_peer *peer)
{
struct sk_buff *skb;
if (skb_queue_empty(&peer->staged_packet_queue)) {
skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
GFP_ATOMIC);
if (unlikely(!skb))
return;
skb_reserve(skb, DATA_PACKET_HEAD_ROOM);
skb->dev = peer->device->dev;
PACKET_CB(skb)->mtu = skb->dev->mtu;
skb_queue_tail(&peer->staged_packet_queue, skb);
net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr);
}
wg_packet_send_staged_packets(peer);
}
static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first)
{
struct sk_buff *skb, *next;
bool is_keepalive, data_sent = false;
wg_timers_any_authenticated_packet_traversal(peer);
wg_timers_any_authenticated_packet_sent(peer);
skb_list_walk_safe(first, skb, next) {
is_keepalive = skb->len == message_data_len(0);
if (likely(!wg_socket_send_skb_to_peer(peer, skb,
PACKET_CB(skb)->ds) && !is_keepalive))
data_sent = true;
}
if (likely(data_sent))
wg_timers_data_sent(peer);
keep_key_fresh(peer);
}
void wg_packet_tx_worker(struct work_struct *work)
{
struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work);
struct noise_keypair *keypair;
enum packet_state state;
struct sk_buff *first;
while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL &&
(state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
PACKET_STATE_UNCRYPTED) {
wg_prev_queue_drop_peeked(&peer->tx_queue);
keypair = PACKET_CB(first)->keypair;
if (likely(state == PACKET_STATE_CRYPTED))
wg_packet_create_data_done(peer, first);
else
kfree_skb_list(first);
wg_noise_keypair_put(keypair, false);
wg_peer_put(peer);
if (need_resched())
cond_resched();
}
}
void wg_packet_encrypt_worker(struct work_struct *work)
{
struct crypt_queue *queue = container_of(work, struct multicore_worker,
work)->ptr;
struct sk_buff *first, *skb, *next;
while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
enum packet_state state = PACKET_STATE_CRYPTED;
skb_list_walk_safe(first, skb, next) {
if (likely(encrypt_packet(skb,
PACKET_CB(first)->keypair))) {
wg_reset_packet(skb, true);
} else {
state = PACKET_STATE_DEAD;
break;
}
}
wg_queue_enqueue_per_peer_tx(first, state);
if (need_resched())
cond_resched();
}
}
static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
{
struct wg_device *wg = peer->device;
int ret = -EINVAL;
rcu_read_lock_bh();
if (unlikely(READ_ONCE(peer->is_dead)))
goto err;
ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
wg->packet_crypt_wq);
if (unlikely(ret == -EPIPE))
wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
err:
rcu_read_unlock_bh();
if (likely(!ret || ret == -EPIPE))
return;
wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
wg_peer_put(peer);
kfree_skb_list(first);
}
void wg_packet_purge_staged_packets(struct wg_peer *peer)
{
spin_lock_bh(&peer->staged_packet_queue.lock);
peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
__skb_queue_purge(&peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
}
void wg_packet_send_staged_packets(struct wg_peer *peer)
{
struct noise_keypair *keypair;
struct sk_buff_head packets;
struct sk_buff *skb;
/* Steal the current queue into our local one. */
__skb_queue_head_init(&packets);
spin_lock_bh(&peer->staged_packet_queue.lock);
skb_queue_splice_init(&peer->staged_packet_queue, &packets);
spin_unlock_bh(&peer->staged_packet_queue.lock);
if (unlikely(skb_queue_empty(&packets)))
return;
/* First we make sure we have a valid reference to a valid key. */
rcu_read_lock_bh();
keypair = wg_noise_keypair_get(
rcu_dereference_bh(peer->keypairs.current_keypair));
rcu_read_unlock_bh();
if (unlikely(!keypair))
goto out_nokey;
if (unlikely(!READ_ONCE(keypair->sending.is_valid)))
goto out_nokey;
if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
REJECT_AFTER_TIME)))
goto out_invalid;
/* After we know we have a somewhat valid key, we now try to assign
* nonces to all of the packets in the queue. If we can't assign nonces
* for all of them, we just consider it a failure and wait for the next
* handshake.
*/
skb_queue_walk(&packets, skb) {
/* 0 for no outer TOS: no leak. TODO: at some later point, we
* might consider using flowi->tos as outer instead.
*/
PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
PACKET_CB(skb)->nonce =
atomic64_inc_return(&keypair->sending_counter) - 1;
if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
goto out_invalid;
}
packets.prev->next = NULL;
wg_peer_get(keypair->entry.peer);
PACKET_CB(packets.next)->keypair = keypair;
wg_packet_create_data(peer, packets.next);
return;
out_invalid:
WRITE_ONCE(keypair->sending.is_valid, false);
out_nokey:
wg_noise_keypair_put(keypair, false);
/* We orphan the packets if we're waiting on a handshake, so that they
* don't block a socket's pool.
*/
skb_queue_walk(&packets, skb)
skb_orphan(skb);
/* Then we put them back on the top of the queue. We're not too
* concerned about accidentally getting things a little out of order if
* packets are being added really fast, because this queue is for before
* packets can even be sent and it's small anyway.
*/
spin_lock_bh(&peer->staged_packet_queue.lock);
skb_queue_splice(&packets, &peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
/* If we're exiting because there's something wrong with the key, it
* means we should initiate a new handshake.
*/
wg_packet_send_queued_handshake_initiation(peer, false);
}
|
linux-master
|
drivers/net/wireguard/send.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "allowedips.h"
#include "peer.h"
enum { MAX_ALLOWEDIPS_DEPTH = 129 };
static struct kmem_cache *node_cache;
static void swap_endian(u8 *dst, const u8 *src, u8 bits)
{
if (bits == 32) {
*(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
} else if (bits == 128) {
((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
}
}
static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
u8 cidr, u8 bits)
{
node->cidr = cidr;
node->bit_at_a = cidr / 8U;
#ifdef __LITTLE_ENDIAN
node->bit_at_a ^= (bits / 8U - 1U) % 8U;
#endif
node->bit_at_b = 7U - (cidr % 8U);
node->bitlen = bits;
memcpy(node->bits, src, bits / 8U);
}
static inline u8 choose(struct allowedips_node *node, const u8 *key)
{
return (key[node->bit_at_a] >> node->bit_at_b) & 1;
}
static void push_rcu(struct allowedips_node **stack,
struct allowedips_node __rcu *p, unsigned int *len)
{
if (rcu_access_pointer(p)) {
if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH))
return;
stack[(*len)++] = rcu_dereference_raw(p);
}
}
static void node_free_rcu(struct rcu_head *rcu)
{
kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
}
static void root_free_rcu(struct rcu_head *rcu)
{
struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = {
container_of(rcu, struct allowedips_node, rcu) };
unsigned int len = 1;
while (len > 0 && (node = stack[--len])) {
push_rcu(stack, node->bit[0], &len);
push_rcu(stack, node->bit[1], &len);
kmem_cache_free(node_cache, node);
}
}
static void root_remove_peer_lists(struct allowedips_node *root)
{
struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root };
unsigned int len = 1;
while (len > 0 && (node = stack[--len])) {
push_rcu(stack, node->bit[0], &len);
push_rcu(stack, node->bit[1], &len);
if (rcu_access_pointer(node->peer))
list_del(&node->peer_list);
}
}
static unsigned int fls128(u64 a, u64 b)
{
return a ? fls64(a) + 64U : fls64(b);
}
static u8 common_bits(const struct allowedips_node *node, const u8 *key,
u8 bits)
{
if (bits == 32)
return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key);
else if (bits == 128)
return 128U - fls128(
*(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0],
*(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]);
return 0;
}
static bool prefix_matches(const struct allowedips_node *node, const u8 *key,
u8 bits)
{
/* This could be much faster if it actually just compared the common
* bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and
* the rest, but it turns out that common_bits is already super fast on
* modern processors, even taking into account the unfortunate bswap.
* So, we just inline it like this instead.
*/
return common_bits(node, key, bits) >= node->cidr;
}
static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
const u8 *key)
{
struct allowedips_node *node = trie, *found = NULL;
while (node && prefix_matches(node, key, bits)) {
if (rcu_access_pointer(node->peer))
found = node;
if (node->cidr == bits)
break;
node = rcu_dereference_bh(node->bit[choose(node, key)]);
}
return found;
}
/* Returns a strong reference to a peer */
static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits,
const void *be_ip)
{
/* Aligned so it can be passed to fls/fls64 */
u8 ip[16] __aligned(__alignof(u64));
struct allowedips_node *node;
struct wg_peer *peer = NULL;
swap_endian(ip, be_ip, bits);
rcu_read_lock_bh();
retry:
node = find_node(rcu_dereference_bh(root), bits, ip);
if (node) {
peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer));
if (!peer)
goto retry;
}
rcu_read_unlock_bh();
return peer;
}
static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
u8 cidr, u8 bits, struct allowedips_node **rnode,
struct mutex *lock)
{
struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
struct allowedips_node *parent = NULL;
bool exact = false;
while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) {
parent = node;
if (parent->cidr == cidr) {
exact = true;
break;
}
node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
}
*rnode = parent;
return exact;
}
static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
{
node->parent_bit_packed = (unsigned long)parent | bit;
rcu_assign_pointer(*parent, node);
}
static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
{
u8 bit = choose(parent, node->bits);
connect_node(&parent->bit[bit], bit, node);
}
static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
u8 cidr, struct wg_peer *peer, struct mutex *lock)
{
struct allowedips_node *node, *parent, *down, *newnode;
if (unlikely(cidr > bits || !peer))
return -EINVAL;
if (!rcu_access_pointer(*trie)) {
node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
RCU_INIT_POINTER(node->peer, peer);
list_add_tail(&node->peer_list, &peer->allowedips_list);
copy_and_assign_cidr(node, key, cidr, bits);
connect_node(trie, 2, node);
return 0;
}
if (node_placement(*trie, key, cidr, bits, &node, lock)) {
rcu_assign_pointer(node->peer, peer);
list_move_tail(&node->peer_list, &peer->allowedips_list);
return 0;
}
newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!newnode))
return -ENOMEM;
RCU_INIT_POINTER(newnode->peer, peer);
list_add_tail(&newnode->peer_list, &peer->allowedips_list);
copy_and_assign_cidr(newnode, key, cidr, bits);
if (!node) {
down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
} else {
const u8 bit = choose(node, key);
down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
if (!down) {
connect_node(&node->bit[bit], bit, newnode);
return 0;
}
}
cidr = min(cidr, common_bits(down, key, bits));
parent = node;
if (newnode->cidr == cidr) {
choose_and_connect_node(newnode, down);
if (!parent)
connect_node(trie, 2, newnode);
else
choose_and_connect_node(parent, newnode);
return 0;
}
node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!node)) {
list_del(&newnode->peer_list);
kmem_cache_free(node_cache, newnode);
return -ENOMEM;
}
INIT_LIST_HEAD(&node->peer_list);
copy_and_assign_cidr(node, newnode->bits, cidr, bits);
choose_and_connect_node(node, down);
choose_and_connect_node(node, newnode);
if (!parent)
connect_node(trie, 2, node);
else
choose_and_connect_node(parent, node);
return 0;
}
void wg_allowedips_init(struct allowedips *table)
{
table->root4 = table->root6 = NULL;
table->seq = 1;
}
void wg_allowedips_free(struct allowedips *table, struct mutex *lock)
{
struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6;
++table->seq;
RCU_INIT_POINTER(table->root4, NULL);
RCU_INIT_POINTER(table->root6, NULL);
if (rcu_access_pointer(old4)) {
struct allowedips_node *node = rcu_dereference_protected(old4,
lockdep_is_held(lock));
root_remove_peer_lists(node);
call_rcu(&node->rcu, root_free_rcu);
}
if (rcu_access_pointer(old6)) {
struct allowedips_node *node = rcu_dereference_protected(old6,
lockdep_is_held(lock));
root_remove_peer_lists(node);
call_rcu(&node->rcu, root_free_rcu);
}
}
int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip,
u8 cidr, struct wg_peer *peer, struct mutex *lock)
{
/* Aligned so it can be passed to fls */
u8 key[4] __aligned(__alignof(u32));
++table->seq;
swap_endian(key, (const u8 *)ip, 32);
return add(&table->root4, 32, key, cidr, peer, lock);
}
int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
u8 cidr, struct wg_peer *peer, struct mutex *lock)
{
/* Aligned so it can be passed to fls64 */
u8 key[16] __aligned(__alignof(u64));
++table->seq;
swap_endian(key, (const u8 *)ip, 128);
return add(&table->root6, 128, key, cidr, peer, lock);
}
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock)
{
struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
bool free_parent;
if (list_empty(&peer->allowedips_list))
return;
++table->seq;
list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
list_del_init(&node->peer_list);
RCU_INIT_POINTER(node->peer, NULL);
if (node->bit[0] && node->bit[1])
continue;
child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
lockdep_is_held(lock));
if (child)
child->parent_bit_packed = node->parent_bit_packed;
parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
*parent_bit = child;
parent = (void *)parent_bit -
offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
free_parent = !rcu_access_pointer(node->bit[0]) &&
!rcu_access_pointer(node->bit[1]) &&
(node->parent_bit_packed & 3) <= 1 &&
!rcu_access_pointer(parent->peer);
if (free_parent)
child = rcu_dereference_protected(
parent->bit[!(node->parent_bit_packed & 1)],
lockdep_is_held(lock));
call_rcu(&node->rcu, node_free_rcu);
if (!free_parent)
continue;
if (child)
child->parent_bit_packed = parent->parent_bit_packed;
*(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
call_rcu(&parent->rcu, node_free_rcu);
}
}
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
{
const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U);
swap_endian(ip, node->bits, node->bitlen);
memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes);
if (node->cidr)
ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U);
*cidr = node->cidr;
return node->bitlen == 32 ? AF_INET : AF_INET6;
}
/* Returns a strong reference to a peer */
struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table,
struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP))
return lookup(table->root4, 32, &ip_hdr(skb)->daddr);
else if (skb->protocol == htons(ETH_P_IPV6))
return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr);
return NULL;
}
/* Returns a strong reference to a peer */
struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
struct sk_buff *skb)
{
if (skb->protocol == htons(ETH_P_IP))
return lookup(table->root4, 32, &ip_hdr(skb)->saddr);
else if (skb->protocol == htons(ETH_P_IPV6))
return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr);
return NULL;
}
int __init wg_allowedips_slab_init(void)
{
node_cache = KMEM_CACHE(allowedips_node, 0);
return node_cache ? 0 : -ENOMEM;
}
void wg_allowedips_slab_uninit(void)
{
rcu_barrier();
kmem_cache_destroy(node_cache);
}
#include "selftest/allowedips.c"
|
linux-master
|
drivers/net/wireguard/allowedips.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "peerlookup.h"
#include "peer.h"
#include "noise.h"
static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
/* siphash gives us a secure 64bit number based on a random key. Since
* the bits are uniformly distributed, we can then mask off to get the
* bits we need.
*/
const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
}
struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
{
struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
get_random_bytes(&table->key, sizeof(table->key));
hash_init(table->hashtable);
mutex_init(&table->lock);
return table;
}
void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
struct wg_peer *peer)
{
mutex_lock(&table->lock);
hlist_add_head_rcu(&peer->pubkey_hash,
pubkey_bucket(table, peer->handshake.remote_static));
mutex_unlock(&table->lock);
}
void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
struct wg_peer *peer)
{
mutex_lock(&table->lock);
hlist_del_init_rcu(&peer->pubkey_hash);
mutex_unlock(&table->lock);
}
/* Returns a strong reference to a peer */
struct wg_peer *
wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{
struct wg_peer *iter_peer, *peer = NULL;
rcu_read_lock_bh();
hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
pubkey_hash) {
if (!memcmp(pubkey, iter_peer->handshake.remote_static,
NOISE_PUBLIC_KEY_LEN)) {
peer = iter_peer;
break;
}
}
peer = wg_peer_get_maybe_zero(peer);
rcu_read_unlock_bh();
return peer;
}
static struct hlist_head *index_bucket(struct index_hashtable *table,
const __le32 index)
{
/* Since the indices are random and thus all bits are uniformly
* distributed, we can find its bucket simply by masking.
*/
return &table->hashtable[(__force u32)index &
(HASH_SIZE(table->hashtable) - 1)];
}
struct index_hashtable *wg_index_hashtable_alloc(void)
{
struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return NULL;
hash_init(table->hashtable);
spin_lock_init(&table->lock);
return table;
}
/* At the moment, we limit ourselves to 2^20 total peers, which generally might
* amount to 2^20*3 items in this hashtable. The algorithm below works by
* picking a random number and testing it. We can see that these limits mean we
* usually succeed pretty quickly:
*
* >>> def calculation(tries, size):
* ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32))
* ...
* >>> calculation(1, 2**20 * 3)
* 0.999267578125
* >>> calculation(2, 2**20 * 3)
* 0.0007318854331970215
* >>> calculation(3, 2**20 * 3)
* 5.360489012673497e-07
* >>> calculation(4, 2**20 * 3)
* 3.9261394135792216e-10
*
* At the moment, we don't do any masking, so this algorithm isn't exactly
* constant time in either the random guessing or in the hash list lookup. We
* could require a minimum of 3 tries, which would successfully mask the
* guessing. this would not, however, help with the growing hash lengths, which
* is another thing to consider moving forward.
*/
__le32 wg_index_hashtable_insert(struct index_hashtable *table,
struct index_hashtable_entry *entry)
{
struct index_hashtable_entry *existing_entry;
spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
spin_unlock_bh(&table->lock);
rcu_read_lock_bh();
search_unused_slot:
/* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
hlist_for_each_entry_rcu_bh(existing_entry,
index_bucket(table, entry->index),
index_hash) {
if (existing_entry->index == entry->index)
/* If it's already in use, we continue searching. */
goto search_unused_slot;
}
/* Once we've found an unused slot, we lock it, and then double-check
* that nobody else stole it from us.
*/
spin_lock_bh(&table->lock);
hlist_for_each_entry_rcu_bh(existing_entry,
index_bucket(table, entry->index),
index_hash) {
if (existing_entry->index == entry->index) {
spin_unlock_bh(&table->lock);
/* If it was stolen, we start over. */
goto search_unused_slot;
}
}
/* Otherwise, we know we have it exclusively (since we're locked),
* so we insert.
*/
hlist_add_head_rcu(&entry->index_hash,
index_bucket(table, entry->index));
spin_unlock_bh(&table->lock);
rcu_read_unlock_bh();
return entry->index;
}
bool wg_index_hashtable_replace(struct index_hashtable *table,
struct index_hashtable_entry *old,
struct index_hashtable_entry *new)
{
bool ret;
spin_lock_bh(&table->lock);
ret = !hlist_unhashed(&old->index_hash);
if (unlikely(!ret))
goto out;
new->index = old->index;
hlist_replace_rcu(&old->index_hash, &new->index_hash);
/* Calling init here NULLs out index_hash, and in fact after this
* function returns, it's theoretically possible for this to get
* reinserted elsewhere. That means the RCU lookup below might either
* terminate early or jump between buckets, in which case the packet
* simply gets dropped, which isn't terrible.
*/
INIT_HLIST_NODE(&old->index_hash);
out:
spin_unlock_bh(&table->lock);
return ret;
}
void wg_index_hashtable_remove(struct index_hashtable *table,
struct index_hashtable_entry *entry)
{
spin_lock_bh(&table->lock);
hlist_del_init_rcu(&entry->index_hash);
spin_unlock_bh(&table->lock);
}
/* Returns a strong reference to a entry->peer */
struct index_hashtable_entry *
wg_index_hashtable_lookup(struct index_hashtable *table,
const enum index_hashtable_type type_mask,
const __le32 index, struct wg_peer **peer)
{
struct index_hashtable_entry *iter_entry, *entry = NULL;
rcu_read_lock_bh();
hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
index_hash) {
if (iter_entry->index == index) {
if (likely(iter_entry->type & type_mask))
entry = iter_entry;
break;
}
}
if (likely(entry)) {
entry->peer = wg_peer_get_maybe_zero(entry->peer);
if (likely(entry->peer))
*peer = entry->peer;
else
entry = NULL;
}
rcu_read_unlock_bh();
return entry;
}
|
linux-master
|
drivers/net/wireguard/peerlookup.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "queueing.h"
#include <linux/skb_array.h>
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
{
int cpu;
struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
if (!worker)
return NULL;
for_each_possible_cpu(cpu) {
per_cpu_ptr(worker, cpu)->ptr = ptr;
INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
}
return worker;
}
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
unsigned int len)
{
int ret;
memset(queue, 0, sizeof(*queue));
queue->last_cpu = -1;
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
if (ret)
return ret;
queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
if (!queue->worker) {
ptr_ring_cleanup(&queue->ring, NULL);
return -ENOMEM;
}
return 0;
}
void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
{
free_percpu(queue->worker);
WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)
#define STUB(queue) ((struct sk_buff *)&queue->empty)
void wg_prev_queue_init(struct prev_queue *queue)
{
NEXT(STUB(queue)) = NULL;
queue->head = queue->tail = STUB(queue);
queue->peeked = NULL;
atomic_set(&queue->count, 0);
BUILD_BUG_ON(
offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
offsetof(struct prev_queue, empty) ||
offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
offsetof(struct prev_queue, empty));
}
static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
{
WRITE_ONCE(NEXT(skb), NULL);
WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
}
bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
{
if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
return false;
__wg_prev_queue_enqueue(queue, skb);
return true;
}
struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
{
struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
if (tail == STUB(queue)) {
if (!next)
return NULL;
queue->tail = next;
tail = next;
next = smp_load_acquire(&NEXT(next));
}
if (next) {
queue->tail = next;
atomic_dec(&queue->count);
return tail;
}
if (tail != READ_ONCE(queue->head))
return NULL;
__wg_prev_queue_enqueue(queue, STUB(queue));
next = smp_load_acquire(&NEXT(tail));
if (next) {
queue->tail = next;
atomic_dec(&queue->count);
return tail;
}
return NULL;
}
#undef NEXT
#undef STUB
|
linux-master
|
drivers/net/wireguard/queueing.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "device.h"
#include "peer.h"
#include "socket.h"
#include "queueing.h"
#include "messages.h"
#include <linux/ctype.h>
#include <linux/net.h>
#include <linux/if_vlan.h>
#include <linux/if_ether.h>
#include <linux/inetdevice.h>
#include <net/udp_tunnel.h>
#include <net/ipv6.h>
static int send4(struct wg_device *wg, struct sk_buff *skb,
struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
{
struct flowi4 fl = {
.saddr = endpoint->src4.s_addr,
.daddr = endpoint->addr4.sin_addr.s_addr,
.fl4_dport = endpoint->addr4.sin_port,
.flowi4_mark = wg->fwmark,
.flowi4_proto = IPPROTO_UDP
};
struct rtable *rt = NULL;
struct sock *sock;
int ret = 0;
skb_mark_not_on_list(skb);
skb->dev = wg->dev;
skb->mark = wg->fwmark;
rcu_read_lock_bh();
sock = rcu_dereference_bh(wg->sock4);
if (unlikely(!sock)) {
ret = -ENONET;
goto err;
}
fl.fl4_sport = inet_sk(sock)->inet_sport;
if (cache)
rt = dst_cache_get_ip4(cache, &fl.saddr);
if (!rt) {
security_sk_classify_flow(sock, flowi4_to_flowi_common(&fl));
if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0,
fl.saddr, RT_SCOPE_HOST))) {
endpoint->src4.s_addr = 0;
endpoint->src_if4 = 0;
fl.saddr = 0;
if (cache)
dst_cache_reset(cache);
}
rt = ip_route_output_flow(sock_net(sock), &fl, sock);
if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) &&
PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) &&
rt->dst.dev->ifindex != endpoint->src_if4)))) {
endpoint->src4.s_addr = 0;
endpoint->src_if4 = 0;
fl.saddr = 0;
if (cache)
dst_cache_reset(cache);
if (!IS_ERR(rt))
ip_rt_put(rt);
rt = ip_route_output_flow(sock_net(sock), &fl, sock);
}
if (IS_ERR(rt)) {
ret = PTR_ERR(rt);
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
goto err;
}
if (cache)
dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
}
skb->ignore_df = 1;
udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds,
ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
fl.fl4_dport, false, false);
goto out;
err:
kfree_skb(skb);
out:
rcu_read_unlock_bh();
return ret;
}
static int send6(struct wg_device *wg, struct sk_buff *skb,
struct endpoint *endpoint, u8 ds, struct dst_cache *cache)
{
#if IS_ENABLED(CONFIG_IPV6)
struct flowi6 fl = {
.saddr = endpoint->src6,
.daddr = endpoint->addr6.sin6_addr,
.fl6_dport = endpoint->addr6.sin6_port,
.flowi6_mark = wg->fwmark,
.flowi6_oif = endpoint->addr6.sin6_scope_id,
.flowi6_proto = IPPROTO_UDP
/* TODO: addr->sin6_flowinfo */
};
struct dst_entry *dst = NULL;
struct sock *sock;
int ret = 0;
skb_mark_not_on_list(skb);
skb->dev = wg->dev;
skb->mark = wg->fwmark;
rcu_read_lock_bh();
sock = rcu_dereference_bh(wg->sock6);
if (unlikely(!sock)) {
ret = -ENONET;
goto err;
}
fl.fl6_sport = inet_sk(sock)->inet_sport;
if (cache)
dst = dst_cache_get_ip6(cache, &fl.saddr);
if (!dst) {
security_sk_classify_flow(sock, flowi6_to_flowi_common(&fl));
if (unlikely(!ipv6_addr_any(&fl.saddr) &&
!ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) {
endpoint->src6 = fl.saddr = in6addr_any;
if (cache)
dst_cache_reset(cache);
}
dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl,
NULL);
if (IS_ERR(dst)) {
ret = PTR_ERR(dst);
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
goto err;
}
if (cache)
dst_cache_set_ip6(cache, dst, &fl.saddr);
}
skb->ignore_df = 1;
udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds,
ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
fl.fl6_dport, false);
goto out;
err:
kfree_skb(skb);
out:
rcu_read_unlock_bh();
return ret;
#else
kfree_skb(skb);
return -EAFNOSUPPORT;
#endif
}
int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds)
{
size_t skb_len = skb->len;
int ret = -EAFNOSUPPORT;
read_lock_bh(&peer->endpoint_lock);
if (peer->endpoint.addr.sa_family == AF_INET)
ret = send4(peer->device, skb, &peer->endpoint, ds,
&peer->endpoint_cache);
else if (peer->endpoint.addr.sa_family == AF_INET6)
ret = send6(peer->device, skb, &peer->endpoint, ds,
&peer->endpoint_cache);
else
dev_kfree_skb(skb);
if (likely(!ret))
peer->tx_bytes += skb_len;
read_unlock_bh(&peer->endpoint_lock);
return ret;
}
int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer,
size_t len, u8 ds)
{
struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
if (unlikely(!skb))
return -ENOMEM;
skb_reserve(skb, SKB_HEADER_LEN);
skb_set_inner_network_header(skb, 0);
skb_put_data(skb, buffer, len);
return wg_socket_send_skb_to_peer(peer, skb, ds);
}
int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg,
struct sk_buff *in_skb, void *buffer,
size_t len)
{
int ret = 0;
struct sk_buff *skb;
struct endpoint endpoint;
if (unlikely(!in_skb))
return -EINVAL;
ret = wg_socket_endpoint_from_skb(&endpoint, in_skb);
if (unlikely(ret < 0))
return ret;
skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC);
if (unlikely(!skb))
return -ENOMEM;
skb_reserve(skb, SKB_HEADER_LEN);
skb_set_inner_network_header(skb, 0);
skb_put_data(skb, buffer, len);
if (endpoint.addr.sa_family == AF_INET)
ret = send4(wg, skb, &endpoint, 0, NULL);
else if (endpoint.addr.sa_family == AF_INET6)
ret = send6(wg, skb, &endpoint, 0, NULL);
/* No other possibilities if the endpoint is valid, which it is,
* as we checked above.
*/
return ret;
}
int wg_socket_endpoint_from_skb(struct endpoint *endpoint,
const struct sk_buff *skb)
{
memset(endpoint, 0, sizeof(*endpoint));
if (skb->protocol == htons(ETH_P_IP)) {
endpoint->addr4.sin_family = AF_INET;
endpoint->addr4.sin_port = udp_hdr(skb)->source;
endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
endpoint->src4.s_addr = ip_hdr(skb)->daddr;
endpoint->src_if4 = skb->skb_iif;
} else if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
endpoint->addr6.sin6_family = AF_INET6;
endpoint->addr6.sin6_port = udp_hdr(skb)->source;
endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr;
endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id(
&ipv6_hdr(skb)->saddr, skb->skb_iif);
endpoint->src6 = ipv6_hdr(skb)->daddr;
} else {
return -EINVAL;
}
return 0;
}
static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b)
{
return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET &&
a->addr4.sin_port == b->addr4.sin_port &&
a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr &&
a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) ||
(a->addr.sa_family == AF_INET6 &&
b->addr.sa_family == AF_INET6 &&
a->addr6.sin6_port == b->addr6.sin6_port &&
ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) &&
a->addr6.sin6_scope_id == b->addr6.sin6_scope_id &&
ipv6_addr_equal(&a->src6, &b->src6)) ||
unlikely(!a->addr.sa_family && !b->addr.sa_family);
}
void wg_socket_set_peer_endpoint(struct wg_peer *peer,
const struct endpoint *endpoint)
{
/* First we check unlocked, in order to optimize, since it's pretty rare
* that an endpoint will change. If we happen to be mid-write, and two
* CPUs wind up writing the same thing or something slightly different,
* it doesn't really matter much either.
*/
if (endpoint_eq(endpoint, &peer->endpoint))
return;
write_lock_bh(&peer->endpoint_lock);
if (endpoint->addr.sa_family == AF_INET) {
peer->endpoint.addr4 = endpoint->addr4;
peer->endpoint.src4 = endpoint->src4;
peer->endpoint.src_if4 = endpoint->src_if4;
} else if (IS_ENABLED(CONFIG_IPV6) && endpoint->addr.sa_family == AF_INET6) {
peer->endpoint.addr6 = endpoint->addr6;
peer->endpoint.src6 = endpoint->src6;
} else {
goto out;
}
dst_cache_reset(&peer->endpoint_cache);
out:
write_unlock_bh(&peer->endpoint_lock);
}
void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer,
const struct sk_buff *skb)
{
struct endpoint endpoint;
if (!wg_socket_endpoint_from_skb(&endpoint, skb))
wg_socket_set_peer_endpoint(peer, &endpoint);
}
void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
{
write_lock_bh(&peer->endpoint_lock);
memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
dst_cache_reset_now(&peer->endpoint_cache);
write_unlock_bh(&peer->endpoint_lock);
}
static int wg_receive(struct sock *sk, struct sk_buff *skb)
{
struct wg_device *wg;
if (unlikely(!sk))
goto err;
wg = sk->sk_user_data;
if (unlikely(!wg))
goto err;
skb_mark_not_on_list(skb);
wg_packet_receive(wg, skb);
return 0;
err:
kfree_skb(skb);
return 0;
}
static void sock_free(struct sock *sock)
{
if (unlikely(!sock))
return;
sk_clear_memalloc(sock);
udp_tunnel_sock_release(sock->sk_socket);
}
static void set_sock_opts(struct socket *sock)
{
sock->sk->sk_allocation = GFP_ATOMIC;
sock->sk->sk_sndbuf = INT_MAX;
sk_set_memalloc(sock->sk);
}
int wg_socket_init(struct wg_device *wg, u16 port)
{
struct net *net;
int ret;
struct udp_tunnel_sock_cfg cfg = {
.sk_user_data = wg,
.encap_type = 1,
.encap_rcv = wg_receive
};
struct socket *new4 = NULL, *new6 = NULL;
struct udp_port_cfg port4 = {
.family = AF_INET,
.local_ip.s_addr = htonl(INADDR_ANY),
.local_udp_port = htons(port),
.use_udp_checksums = true
};
#if IS_ENABLED(CONFIG_IPV6)
int retries = 0;
struct udp_port_cfg port6 = {
.family = AF_INET6,
.local_ip6 = IN6ADDR_ANY_INIT,
.use_udp6_tx_checksums = true,
.use_udp6_rx_checksums = true,
.ipv6_v6only = true
};
#endif
rcu_read_lock();
net = rcu_dereference(wg->creating_net);
net = net ? maybe_get_net(net) : NULL;
rcu_read_unlock();
if (unlikely(!net))
return -ENONET;
#if IS_ENABLED(CONFIG_IPV6)
retry:
#endif
ret = udp_sock_create(net, &port4, &new4);
if (ret < 0) {
pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
goto out;
}
set_sock_opts(new4);
setup_udp_tunnel_sock(net, new4, &cfg);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6_mod_enabled()) {
port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
ret = udp_sock_create(net, &port6, &new6);
if (ret < 0) {
udp_tunnel_sock_release(new4);
if (ret == -EADDRINUSE && !port && retries++ < 100)
goto retry;
pr_err("%s: Could not create IPv6 socket\n",
wg->dev->name);
goto out;
}
set_sock_opts(new6);
setup_udp_tunnel_sock(net, new6, &cfg);
}
#endif
wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
ret = 0;
out:
put_net(net);
return ret;
}
void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
struct sock *new6)
{
struct sock *old4, *old6;
mutex_lock(&wg->socket_update_lock);
old4 = rcu_dereference_protected(wg->sock4,
lockdep_is_held(&wg->socket_update_lock));
old6 = rcu_dereference_protected(wg->sock6,
lockdep_is_held(&wg->socket_update_lock));
rcu_assign_pointer(wg->sock4, new4);
rcu_assign_pointer(wg->sock6, new6);
if (new4)
wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
mutex_unlock(&wg->socket_update_lock);
synchronize_net();
sock_free(old4);
sock_free(old6);
}
|
linux-master
|
drivers/net/wireguard/socket.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "noise.h"
#include "device.h"
#include "peer.h"
#include "messages.h"
#include "queueing.h"
#include "peerlookup.h"
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <crypto/algapi.h>
/* This implements Noise_IKpsk2:
*
* <- s
* ******
* -> e, es, s, ss, {t}
* <- e, ee, se, psk, {}
*/
static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s";
static const u8 identifier_name[34] = "WireGuard v1 zx2c4 [email protected]";
static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init;
static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init;
static atomic64_t keypair_counter = ATOMIC64_INIT(0);
void __init wg_noise_init(void)
{
struct blake2s_state blake;
blake2s(handshake_init_chaining_key, handshake_name, NULL,
NOISE_HASH_LEN, sizeof(handshake_name), 0);
blake2s_init(&blake, NOISE_HASH_LEN);
blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN);
blake2s_update(&blake, identifier_name, sizeof(identifier_name));
blake2s_final(&blake, handshake_init_hash);
}
/* Must hold peer->handshake.static_identity->lock */
void wg_noise_precompute_static_static(struct wg_peer *peer)
{
down_write(&peer->handshake.lock);
if (!peer->handshake.static_identity->has_identity ||
!curve25519(peer->handshake.precomputed_static_static,
peer->handshake.static_identity->static_private,
peer->handshake.remote_static))
memset(peer->handshake.precomputed_static_static, 0,
NOISE_PUBLIC_KEY_LEN);
up_write(&peer->handshake.lock);
}
void wg_noise_handshake_init(struct noise_handshake *handshake,
struct noise_static_identity *static_identity,
const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN],
const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN],
struct wg_peer *peer)
{
memset(handshake, 0, sizeof(*handshake));
init_rwsem(&handshake->lock);
handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE;
handshake->entry.peer = peer;
memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN);
if (peer_preshared_key)
memcpy(handshake->preshared_key, peer_preshared_key,
NOISE_SYMMETRIC_KEY_LEN);
handshake->static_identity = static_identity;
handshake->state = HANDSHAKE_ZEROED;
wg_noise_precompute_static_static(peer);
}
static void handshake_zero(struct noise_handshake *handshake)
{
memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN);
memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN);
memset(&handshake->hash, 0, NOISE_HASH_LEN);
memset(&handshake->chaining_key, 0, NOISE_HASH_LEN);
handshake->remote_index = 0;
handshake->state = HANDSHAKE_ZEROED;
}
void wg_noise_handshake_clear(struct noise_handshake *handshake)
{
down_write(&handshake->lock);
wg_index_hashtable_remove(
handshake->entry.peer->device->index_hashtable,
&handshake->entry);
handshake_zero(handshake);
up_write(&handshake->lock);
}
static struct noise_keypair *keypair_create(struct wg_peer *peer)
{
struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL);
if (unlikely(!keypair))
return NULL;
spin_lock_init(&keypair->receiving_counter.lock);
keypair->internal_id = atomic64_inc_return(&keypair_counter);
keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
keypair->entry.peer = peer;
kref_init(&keypair->refcount);
return keypair;
}
static void keypair_free_rcu(struct rcu_head *rcu)
{
kfree_sensitive(container_of(rcu, struct noise_keypair, rcu));
}
static void keypair_free_kref(struct kref *kref)
{
struct noise_keypair *keypair =
container_of(kref, struct noise_keypair, refcount);
net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n",
keypair->entry.peer->device->dev->name,
keypair->internal_id,
keypair->entry.peer->internal_id);
wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable,
&keypair->entry);
call_rcu(&keypair->rcu, keypair_free_rcu);
}
void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now)
{
if (unlikely(!keypair))
return;
if (unlikely(unreference_now))
wg_index_hashtable_remove(
keypair->entry.peer->device->index_hashtable,
&keypair->entry);
kref_put(&keypair->refcount, keypair_free_kref);
}
struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
"Taking noise keypair reference without holding the RCU BH read lock");
if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount)))
return NULL;
return keypair;
}
void wg_noise_keypairs_clear(struct noise_keypairs *keypairs)
{
struct noise_keypair *old;
spin_lock_bh(&keypairs->keypair_update_lock);
/* We zero the next_keypair before zeroing the others, so that
* wg_noise_received_with_keypair returns early before subsequent ones
* are zeroed.
*/
old = rcu_dereference_protected(keypairs->next_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
RCU_INIT_POINTER(keypairs->next_keypair, NULL);
wg_noise_keypair_put(old, true);
old = rcu_dereference_protected(keypairs->previous_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
wg_noise_keypair_put(old, true);
old = rcu_dereference_protected(keypairs->current_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
RCU_INIT_POINTER(keypairs->current_keypair, NULL);
wg_noise_keypair_put(old, true);
spin_unlock_bh(&keypairs->keypair_update_lock);
}
void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer)
{
struct noise_keypair *keypair;
wg_noise_handshake_clear(&peer->handshake);
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
spin_lock_bh(&peer->keypairs.keypair_update_lock);
keypair = rcu_dereference_protected(peer->keypairs.next_keypair,
lockdep_is_held(&peer->keypairs.keypair_update_lock));
if (keypair)
keypair->sending.is_valid = false;
keypair = rcu_dereference_protected(peer->keypairs.current_keypair,
lockdep_is_held(&peer->keypairs.keypair_update_lock));
if (keypair)
keypair->sending.is_valid = false;
spin_unlock_bh(&peer->keypairs.keypair_update_lock);
}
static void add_new_keypair(struct noise_keypairs *keypairs,
struct noise_keypair *new_keypair)
{
struct noise_keypair *previous_keypair, *next_keypair, *current_keypair;
spin_lock_bh(&keypairs->keypair_update_lock);
previous_keypair = rcu_dereference_protected(keypairs->previous_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
next_keypair = rcu_dereference_protected(keypairs->next_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
current_keypair = rcu_dereference_protected(keypairs->current_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
if (new_keypair->i_am_the_initiator) {
/* If we're the initiator, it means we've sent a handshake, and
* received a confirmation response, which means this new
* keypair can now be used.
*/
if (next_keypair) {
/* If there already was a next keypair pending, we
* demote it to be the previous keypair, and free the
* existing current. Note that this means KCI can result
* in this transition. It would perhaps be more sound to
* always just get rid of the unused next keypair
* instead of putting it in the previous slot, but this
* might be a bit less robust. Something to think about
* for the future.
*/
RCU_INIT_POINTER(keypairs->next_keypair, NULL);
rcu_assign_pointer(keypairs->previous_keypair,
next_keypair);
wg_noise_keypair_put(current_keypair, true);
} else /* If there wasn't an existing next keypair, we replace
* the previous with the current one.
*/
rcu_assign_pointer(keypairs->previous_keypair,
current_keypair);
/* At this point we can get rid of the old previous keypair, and
* set up the new keypair.
*/
wg_noise_keypair_put(previous_keypair, true);
rcu_assign_pointer(keypairs->current_keypair, new_keypair);
} else {
/* If we're the responder, it means we can't use the new keypair
* until we receive confirmation via the first data packet, so
* we get rid of the existing previous one, the possibly
* existing next one, and slide in the new next one.
*/
rcu_assign_pointer(keypairs->next_keypair, new_keypair);
wg_noise_keypair_put(next_keypair, true);
RCU_INIT_POINTER(keypairs->previous_keypair, NULL);
wg_noise_keypair_put(previous_keypair, true);
}
spin_unlock_bh(&keypairs->keypair_update_lock);
}
bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs,
struct noise_keypair *received_keypair)
{
struct noise_keypair *old_keypair;
bool key_is_new;
/* We first check without taking the spinlock. */
key_is_new = received_keypair ==
rcu_access_pointer(keypairs->next_keypair);
if (likely(!key_is_new))
return false;
spin_lock_bh(&keypairs->keypair_update_lock);
/* After locking, we double check that things didn't change from
* beneath us.
*/
if (unlikely(received_keypair !=
rcu_dereference_protected(keypairs->next_keypair,
lockdep_is_held(&keypairs->keypair_update_lock)))) {
spin_unlock_bh(&keypairs->keypair_update_lock);
return false;
}
/* When we've finally received the confirmation, we slide the next
* into the current, the current into the previous, and get rid of
* the old previous.
*/
old_keypair = rcu_dereference_protected(keypairs->previous_keypair,
lockdep_is_held(&keypairs->keypair_update_lock));
rcu_assign_pointer(keypairs->previous_keypair,
rcu_dereference_protected(keypairs->current_keypair,
lockdep_is_held(&keypairs->keypair_update_lock)));
wg_noise_keypair_put(old_keypair, true);
rcu_assign_pointer(keypairs->current_keypair, received_keypair);
RCU_INIT_POINTER(keypairs->next_keypair, NULL);
spin_unlock_bh(&keypairs->keypair_update_lock);
return true;
}
/* Must hold static_identity->lock */
void wg_noise_set_static_identity_private_key(
struct noise_static_identity *static_identity,
const u8 private_key[NOISE_PUBLIC_KEY_LEN])
{
memcpy(static_identity->static_private, private_key,
NOISE_PUBLIC_KEY_LEN);
curve25519_clamp_secret(static_identity->static_private);
static_identity->has_identity = curve25519_generate_public(
static_identity->static_public, private_key);
}
static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
{
struct blake2s_state state;
u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
int i;
if (keylen > BLAKE2S_BLOCK_SIZE) {
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, key, keylen);
blake2s_final(&state, x_key);
} else
memcpy(x_key, key, keylen);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x36;
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
blake2s_update(&state, in, inlen);
blake2s_final(&state, i_hash);
for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
x_key[i] ^= 0x5c ^ 0x36;
blake2s_init(&state, BLAKE2S_HASH_SIZE);
blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
blake2s_final(&state, i_hash);
memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
}
/* This is Hugo Krawczyk's HKDF:
* - https://eprint.iacr.org/2010/264.pdf
* - https://tools.ietf.org/html/rfc5869
*/
static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
size_t first_len, size_t second_len, size_t third_len,
size_t data_len, const u8 chaining_key[NOISE_HASH_LEN])
{
u8 output[BLAKE2S_HASH_SIZE + 1];
u8 secret[BLAKE2S_HASH_SIZE];
WARN_ON(IS_ENABLED(DEBUG) &&
(first_len > BLAKE2S_HASH_SIZE ||
second_len > BLAKE2S_HASH_SIZE ||
third_len > BLAKE2S_HASH_SIZE ||
((second_len || second_dst || third_len || third_dst) &&
(!first_len || !first_dst)) ||
((third_len || third_dst) && (!second_len || !second_dst))));
/* Extract entropy from data into secret */
hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
if (!first_dst || !first_len)
goto out;
/* Expand first key: key = secret, data = 0x1 */
output[0] = 1;
hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
memcpy(first_dst, output, first_len);
if (!second_dst || !second_len)
goto out;
/* Expand second key: key = secret, data = first-key || 0x2 */
output[BLAKE2S_HASH_SIZE] = 2;
hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(second_dst, output, second_len);
if (!third_dst || !third_len)
goto out;
/* Expand third key: key = secret, data = second-key || 0x3 */
output[BLAKE2S_HASH_SIZE] = 3;
hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(third_dst, output, third_len);
out:
/* Clear sensitive data from stack */
memzero_explicit(secret, BLAKE2S_HASH_SIZE);
memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
}
static void derive_keys(struct noise_symmetric_key *first_dst,
struct noise_symmetric_key *second_dst,
const u8 chaining_key[NOISE_HASH_LEN])
{
u64 birthdate = ktime_get_coarse_boottime_ns();
kdf(first_dst->key, second_dst->key, NULL, NULL,
NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
chaining_key);
first_dst->birthdate = second_dst->birthdate = birthdate;
first_dst->is_valid = second_dst->is_valid = true;
}
static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
u8 key[NOISE_SYMMETRIC_KEY_LEN],
const u8 private[NOISE_PUBLIC_KEY_LEN],
const u8 public[NOISE_PUBLIC_KEY_LEN])
{
u8 dh_calculation[NOISE_PUBLIC_KEY_LEN];
if (unlikely(!curve25519(dh_calculation, private, public)))
return false;
kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN,
NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key);
memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN);
return true;
}
static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN],
u8 key[NOISE_SYMMETRIC_KEY_LEN],
const u8 precomputed[NOISE_PUBLIC_KEY_LEN])
{
static u8 zero_point[NOISE_PUBLIC_KEY_LEN];
if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN)))
return false;
kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN,
NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN,
chaining_key);
return true;
}
static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len)
{
struct blake2s_state blake;
blake2s_init(&blake, NOISE_HASH_LEN);
blake2s_update(&blake, hash, NOISE_HASH_LEN);
blake2s_update(&blake, src, src_len);
blake2s_final(&blake, hash);
}
static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN],
u8 key[NOISE_SYMMETRIC_KEY_LEN],
const u8 psk[NOISE_SYMMETRIC_KEY_LEN])
{
u8 temp_hash[NOISE_HASH_LEN];
kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN,
NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key);
mix_hash(hash, temp_hash, NOISE_HASH_LEN);
memzero_explicit(temp_hash, NOISE_HASH_LEN);
}
static void handshake_init(u8 chaining_key[NOISE_HASH_LEN],
u8 hash[NOISE_HASH_LEN],
const u8 remote_static[NOISE_PUBLIC_KEY_LEN])
{
memcpy(hash, handshake_init_hash, NOISE_HASH_LEN);
memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN);
mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN);
}
static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext,
size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
u8 hash[NOISE_HASH_LEN])
{
chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash,
NOISE_HASH_LEN,
0 /* Always zero for Noise_IK */, key);
mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len));
}
static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext,
size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN],
u8 hash[NOISE_HASH_LEN])
{
if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len,
hash, NOISE_HASH_LEN,
0 /* Always zero for Noise_IK */, key))
return false;
mix_hash(hash, src_ciphertext, src_len);
return true;
}
static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN],
const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN],
u8 chaining_key[NOISE_HASH_LEN],
u8 hash[NOISE_HASH_LEN])
{
if (ephemeral_dst != ephemeral_src)
memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN);
kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0,
NOISE_PUBLIC_KEY_LEN, chaining_key);
}
static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN])
{
struct timespec64 now;
ktime_get_real_ts64(&now);
/* In order to prevent some sort of infoleak from precise timers, we
* round down the nanoseconds part to the closest rounded-down power of
* two to the maximum initiations per second allowed anyway by the
* implementation.
*/
now.tv_nsec = ALIGN_DOWN(now.tv_nsec,
rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND));
/* https://cr.yp.to/libtai/tai64.html */
*(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec);
*(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec);
}
bool
wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst,
struct noise_handshake *handshake)
{
u8 timestamp[NOISE_TIMESTAMP_LEN];
u8 key[NOISE_SYMMETRIC_KEY_LEN];
bool ret = false;
/* We need to wait for crng _before_ taking any locks, since
* curve25519_generate_secret uses get_random_bytes_wait.
*/
wait_for_random_bytes();
down_read(&handshake->static_identity->lock);
down_write(&handshake->lock);
if (unlikely(!handshake->static_identity->has_identity))
goto out;
dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION);
handshake_init(handshake->chaining_key, handshake->hash,
handshake->remote_static);
/* e */
curve25519_generate_secret(handshake->ephemeral_private);
if (!curve25519_generate_public(dst->unencrypted_ephemeral,
handshake->ephemeral_private))
goto out;
message_ephemeral(dst->unencrypted_ephemeral,
dst->unencrypted_ephemeral, handshake->chaining_key,
handshake->hash);
/* es */
if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private,
handshake->remote_static))
goto out;
/* s */
message_encrypt(dst->encrypted_static,
handshake->static_identity->static_public,
NOISE_PUBLIC_KEY_LEN, key, handshake->hash);
/* ss */
if (!mix_precomputed_dh(handshake->chaining_key, key,
handshake->precomputed_static_static))
goto out;
/* {t} */
tai64n_now(timestamp);
message_encrypt(dst->encrypted_timestamp, timestamp,
NOISE_TIMESTAMP_LEN, key, handshake->hash);
dst->sender_index = wg_index_hashtable_insert(
handshake->entry.peer->device->index_hashtable,
&handshake->entry);
handshake->state = HANDSHAKE_CREATED_INITIATION;
ret = true;
out:
up_write(&handshake->lock);
up_read(&handshake->static_identity->lock);
memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
return ret;
}
struct wg_peer *
wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
struct wg_device *wg)
{
struct wg_peer *peer = NULL, *ret_peer = NULL;
struct noise_handshake *handshake;
bool replay_attack, flood_attack;
u8 key[NOISE_SYMMETRIC_KEY_LEN];
u8 chaining_key[NOISE_HASH_LEN];
u8 hash[NOISE_HASH_LEN];
u8 s[NOISE_PUBLIC_KEY_LEN];
u8 e[NOISE_PUBLIC_KEY_LEN];
u8 t[NOISE_TIMESTAMP_LEN];
u64 initiation_consumption;
down_read(&wg->static_identity.lock);
if (unlikely(!wg->static_identity.has_identity))
goto out;
handshake_init(chaining_key, hash, wg->static_identity.static_public);
/* e */
message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
/* es */
if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e))
goto out;
/* s */
if (!message_decrypt(s, src->encrypted_static,
sizeof(src->encrypted_static), key, hash))
goto out;
/* Lookup which peer we're actually talking to */
peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s);
if (!peer)
goto out;
handshake = &peer->handshake;
/* ss */
if (!mix_precomputed_dh(chaining_key, key,
handshake->precomputed_static_static))
goto out;
/* {t} */
if (!message_decrypt(t, src->encrypted_timestamp,
sizeof(src->encrypted_timestamp), key, hash))
goto out;
down_read(&handshake->lock);
replay_attack = memcmp(t, handshake->latest_timestamp,
NOISE_TIMESTAMP_LEN) <= 0;
flood_attack = (s64)handshake->last_initiation_consumption +
NSEC_PER_SEC / INITIATIONS_PER_SECOND >
(s64)ktime_get_coarse_boottime_ns();
up_read(&handshake->lock);
if (replay_attack || flood_attack)
goto out;
/* Success! Copy everything to peer */
down_write(&handshake->lock);
memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0)
memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN);
memcpy(handshake->hash, hash, NOISE_HASH_LEN);
memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
handshake->remote_index = src->sender_index;
initiation_consumption = ktime_get_coarse_boottime_ns();
if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0)
handshake->last_initiation_consumption = initiation_consumption;
handshake->state = HANDSHAKE_CONSUMED_INITIATION;
up_write(&handshake->lock);
ret_peer = peer;
out:
memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
memzero_explicit(hash, NOISE_HASH_LEN);
memzero_explicit(chaining_key, NOISE_HASH_LEN);
up_read(&wg->static_identity.lock);
if (!ret_peer)
wg_peer_put(peer);
return ret_peer;
}
bool wg_noise_handshake_create_response(struct message_handshake_response *dst,
struct noise_handshake *handshake)
{
u8 key[NOISE_SYMMETRIC_KEY_LEN];
bool ret = false;
/* We need to wait for crng _before_ taking any locks, since
* curve25519_generate_secret uses get_random_bytes_wait.
*/
wait_for_random_bytes();
down_read(&handshake->static_identity->lock);
down_write(&handshake->lock);
if (handshake->state != HANDSHAKE_CONSUMED_INITIATION)
goto out;
dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE);
dst->receiver_index = handshake->remote_index;
/* e */
curve25519_generate_secret(handshake->ephemeral_private);
if (!curve25519_generate_public(dst->unencrypted_ephemeral,
handshake->ephemeral_private))
goto out;
message_ephemeral(dst->unencrypted_ephemeral,
dst->unencrypted_ephemeral, handshake->chaining_key,
handshake->hash);
/* ee */
if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
handshake->remote_ephemeral))
goto out;
/* se */
if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private,
handshake->remote_static))
goto out;
/* psk */
mix_psk(handshake->chaining_key, handshake->hash, key,
handshake->preshared_key);
/* {} */
message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash);
dst->sender_index = wg_index_hashtable_insert(
handshake->entry.peer->device->index_hashtable,
&handshake->entry);
handshake->state = HANDSHAKE_CREATED_RESPONSE;
ret = true;
out:
up_write(&handshake->lock);
up_read(&handshake->static_identity->lock);
memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
return ret;
}
struct wg_peer *
wg_noise_handshake_consume_response(struct message_handshake_response *src,
struct wg_device *wg)
{
enum noise_handshake_state state = HANDSHAKE_ZEROED;
struct wg_peer *peer = NULL, *ret_peer = NULL;
struct noise_handshake *handshake;
u8 key[NOISE_SYMMETRIC_KEY_LEN];
u8 hash[NOISE_HASH_LEN];
u8 chaining_key[NOISE_HASH_LEN];
u8 e[NOISE_PUBLIC_KEY_LEN];
u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
u8 static_private[NOISE_PUBLIC_KEY_LEN];
u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
down_read(&wg->static_identity.lock);
if (unlikely(!wg->static_identity.has_identity))
goto out;
handshake = (struct noise_handshake *)wg_index_hashtable_lookup(
wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE,
src->receiver_index, &peer);
if (unlikely(!handshake))
goto out;
down_read(&handshake->lock);
state = handshake->state;
memcpy(hash, handshake->hash, NOISE_HASH_LEN);
memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
memcpy(ephemeral_private, handshake->ephemeral_private,
NOISE_PUBLIC_KEY_LEN);
memcpy(preshared_key, handshake->preshared_key,
NOISE_SYMMETRIC_KEY_LEN);
up_read(&handshake->lock);
if (state != HANDSHAKE_CREATED_INITIATION)
goto fail;
/* e */
message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash);
/* ee */
if (!mix_dh(chaining_key, NULL, ephemeral_private, e))
goto fail;
/* se */
if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e))
goto fail;
/* psk */
mix_psk(chaining_key, hash, key, preshared_key);
/* {} */
if (!message_decrypt(NULL, src->encrypted_nothing,
sizeof(src->encrypted_nothing), key, hash))
goto fail;
/* Success! Copy everything to peer */
down_write(&handshake->lock);
/* It's important to check that the state is still the same, while we
* have an exclusive lock.
*/
if (handshake->state != state) {
up_write(&handshake->lock);
goto fail;
}
memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN);
memcpy(handshake->hash, hash, NOISE_HASH_LEN);
memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
handshake->remote_index = src->sender_index;
handshake->state = HANDSHAKE_CONSUMED_RESPONSE;
up_write(&handshake->lock);
ret_peer = peer;
goto out;
fail:
wg_peer_put(peer);
out:
memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN);
memzero_explicit(hash, NOISE_HASH_LEN);
memzero_explicit(chaining_key, NOISE_HASH_LEN);
memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN);
up_read(&wg->static_identity.lock);
return ret_peer;
}
bool wg_noise_handshake_begin_session(struct noise_handshake *handshake,
struct noise_keypairs *keypairs)
{
struct noise_keypair *new_keypair;
bool ret = false;
down_write(&handshake->lock);
if (handshake->state != HANDSHAKE_CREATED_RESPONSE &&
handshake->state != HANDSHAKE_CONSUMED_RESPONSE)
goto out;
new_keypair = keypair_create(handshake->entry.peer);
if (!new_keypair)
goto out;
new_keypair->i_am_the_initiator = handshake->state ==
HANDSHAKE_CONSUMED_RESPONSE;
new_keypair->remote_index = handshake->remote_index;
if (new_keypair->i_am_the_initiator)
derive_keys(&new_keypair->sending, &new_keypair->receiving,
handshake->chaining_key);
else
derive_keys(&new_keypair->receiving, &new_keypair->sending,
handshake->chaining_key);
handshake_zero(handshake);
rcu_read_lock_bh();
if (likely(!READ_ONCE(container_of(handshake, struct wg_peer,
handshake)->is_dead))) {
add_new_keypair(keypairs, new_keypair);
net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n",
handshake->entry.peer->device->dev->name,
new_keypair->internal_id,
handshake->entry.peer->internal_id);
ret = wg_index_hashtable_replace(
handshake->entry.peer->device->index_hashtable,
&handshake->entry, &new_keypair->entry);
} else {
kfree_sensitive(new_keypair);
}
rcu_read_unlock_bh();
out:
up_write(&handshake->lock);
return ret;
}
|
linux-master
|
drivers/net/wireguard/noise.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "peer.h"
#include "device.h"
#include "queueing.h"
#include "timers.h"
#include "peerlookup.h"
#include "noise.h"
#include <linux/kref.h>
#include <linux/lockdep.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
static struct kmem_cache *peer_cache;
static atomic64_t peer_counter = ATOMIC64_INIT(0);
struct wg_peer *wg_peer_create(struct wg_device *wg,
const u8 public_key[NOISE_PUBLIC_KEY_LEN],
const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN])
{
struct wg_peer *peer;
int ret = -ENOMEM;
lockdep_assert_held(&wg->device_update_lock);
if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
return ERR_PTR(ret);
peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
if (unlikely(!peer))
return ERR_PTR(ret);
if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
goto err;
peer->device = wg;
wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
public_key, preshared_key, peer);
peer->internal_id = atomic64_inc_return(&peer_counter);
peer->serial_work_cpu = nr_cpumask_bits;
wg_cookie_init(&peer->latest_cookie);
wg_timers_init(peer);
wg_cookie_checker_precompute_peer_keys(peer);
spin_lock_init(&peer->keypairs.keypair_update_lock);
INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
wg_prev_queue_init(&peer->tx_queue);
wg_prev_queue_init(&peer->rx_queue);
rwlock_init(&peer->endpoint_lock);
kref_init(&peer->refcount);
skb_queue_head_init(&peer->staged_packet_queue);
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state);
netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll);
napi_enable(&peer->napi);
list_add_tail(&peer->peer_list, &wg->peer_list);
INIT_LIST_HEAD(&peer->allowedips_list);
wg_pubkey_hashtable_add(wg->peer_hashtable, peer);
++wg->num_peers;
pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
return peer;
err:
kmem_cache_free(peer_cache, peer);
return ERR_PTR(ret);
}
struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer)
{
RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(),
"Taking peer reference without holding the RCU read lock");
if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount)))
return NULL;
return peer;
}
static void peer_make_dead(struct wg_peer *peer)
{
/* Remove from configuration-time lookup structures. */
list_del_init(&peer->peer_list);
wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer,
&peer->device->device_update_lock);
wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer);
/* Mark as dead, so that we don't allow jumping contexts after. */
WRITE_ONCE(peer->is_dead, true);
/* The caller must now synchronize_net() for this to take effect. */
}
static void peer_remove_after_dead(struct wg_peer *peer)
{
WARN_ON(!peer->is_dead);
/* No more keypairs can be created for this peer, since is_dead protects
* add_new_keypair, so we can now destroy existing ones.
*/
wg_noise_keypairs_clear(&peer->keypairs);
/* Destroy all ongoing timers that were in-flight at the beginning of
* this function.
*/
wg_timers_stop(peer);
/* The transition between packet encryption/decryption queues isn't
* guarded by is_dead, but each reference's life is strictly bounded by
* two generations: once for parallel crypto and once for serial
* ingestion, so we can simply flush twice, and be sure that we no
* longer have references inside these queues.
*/
/* a) For encrypt/decrypt. */
flush_workqueue(peer->device->packet_crypt_wq);
/* b.1) For send (but not receive, since that's napi). */
flush_workqueue(peer->device->packet_crypt_wq);
/* b.2.1) For receive (but not send, since that's wq). */
napi_disable(&peer->napi);
/* b.2.1) It's now safe to remove the napi struct, which must be done
* here from process context.
*/
netif_napi_del(&peer->napi);
/* Ensure any workstructs we own (like transmit_handshake_work or
* clear_peer_work) no longer are in use.
*/
flush_workqueue(peer->device->handshake_send_wq);
/* After the above flushes, a peer might still be active in a few
* different contexts: 1) from xmit(), before hitting is_dead and
* returning, 2) from wg_packet_consume_data(), before hitting is_dead
* and returning, 3) from wg_receive_handshake_packet() after a point
* where it has processed an incoming handshake packet, but where
* all calls to pass it off to timers fails because of is_dead. We won't
* have new references in (1) eventually, because we're removed from
* allowedips; we won't have new references in (2) eventually, because
* wg_index_hashtable_lookup will always return NULL, since we removed
* all existing keypairs and no more can be created; we won't have new
* references in (3) eventually, because we're removed from the pubkey
* hash table, which allows for a maximum of one handshake response,
* via the still-uncleared index hashtable entry, but not more than one,
* and in wg_cookie_message_consume, the lookup eventually gets a peer
* with a refcount of zero, so no new reference is taken.
*/
--peer->device->num_peers;
wg_peer_put(peer);
}
/* We have a separate "remove" function make sure that all active places where
* a peer is currently operating will eventually come to an end and not pass
* their reference onto another context.
*/
void wg_peer_remove(struct wg_peer *peer)
{
if (unlikely(!peer))
return;
lockdep_assert_held(&peer->device->device_update_lock);
peer_make_dead(peer);
synchronize_net();
peer_remove_after_dead(peer);
}
void wg_peer_remove_all(struct wg_device *wg)
{
struct wg_peer *peer, *temp;
LIST_HEAD(dead_peers);
lockdep_assert_held(&wg->device_update_lock);
/* Avoid having to traverse individually for each one. */
wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock);
list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
peer_make_dead(peer);
list_add_tail(&peer->peer_list, &dead_peers);
}
synchronize_net();
list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
peer_remove_after_dead(peer);
}
static void rcu_release(struct rcu_head *rcu)
{
struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);
dst_cache_destroy(&peer->endpoint_cache);
WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));
/* The final zeroing takes care of clearing any remaining handshake key
* material and other potentially sensitive information.
*/
memzero_explicit(peer, sizeof(*peer));
kmem_cache_free(peer_cache, peer);
}
static void kref_release(struct kref *refcount)
{
struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount);
pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr);
/* Remove ourself from dynamic runtime lookup structures, now that the
* last reference is gone.
*/
wg_index_hashtable_remove(peer->device->index_hashtable,
&peer->handshake.entry);
/* Remove any lingering packets that didn't have a chance to be
* transmitted.
*/
wg_packet_purge_staged_packets(peer);
/* Free the memory used. */
call_rcu(&peer->rcu, rcu_release);
}
void wg_peer_put(struct wg_peer *peer)
{
if (unlikely(!peer))
return;
kref_put(&peer->refcount, kref_release);
}
int __init wg_peer_init(void)
{
peer_cache = KMEM_CACHE(wg_peer, 0);
return peer_cache ? 0 : -ENOMEM;
}
void wg_peer_uninit(void)
{
kmem_cache_destroy(peer_cache);
}
|
linux-master
|
drivers/net/wireguard/peer.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "version.h"
#include "device.h"
#include "noise.h"
#include "queueing.h"
#include "ratelimiter.h"
#include "netlink.h"
#include <uapi/linux/wireguard.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/genetlink.h>
#include <net/rtnetlink.h>
static int __init wg_mod_init(void)
{
int ret;
ret = wg_allowedips_slab_init();
if (ret < 0)
goto err_allowedips;
#ifdef DEBUG
ret = -ENOTRECOVERABLE;
if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
!wg_ratelimiter_selftest())
goto err_peer;
#endif
wg_noise_init();
ret = wg_peer_init();
if (ret < 0)
goto err_peer;
ret = wg_device_init();
if (ret < 0)
goto err_device;
ret = wg_genetlink_init();
if (ret < 0)
goto err_netlink;
pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n");
pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.\n");
return 0;
err_netlink:
wg_device_uninit();
err_device:
wg_peer_uninit();
err_peer:
wg_allowedips_slab_uninit();
err_allowedips:
return ret;
}
static void __exit wg_mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
wg_peer_uninit();
wg_allowedips_slab_uninit();
}
module_init(wg_mod_init);
module_exit(wg_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("WireGuard secure network tunnel");
MODULE_AUTHOR("Jason A. Donenfeld <[email protected]>");
MODULE_VERSION(WIREGUARD_VERSION);
MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME);
MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME);
|
linux-master
|
drivers/net/wireguard/main.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "timers.h"
#include "device.h"
#include "peer.h"
#include "queueing.h"
#include "socket.h"
/*
* - Timer for retransmitting the handshake if we don't hear back after
* `REKEY_TIMEOUT + jitter` ms.
*
* - Timer for sending empty packet if we have received a packet but after have
* not sent one for `KEEPALIVE_TIMEOUT` ms.
*
* - Timer for initiating new handshake if we have sent a packet but after have
* not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) +
* jitter` ms.
*
* - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms
* if no new keys have been received.
*
* - Timer for, if enabled, sending an empty authenticated packet every user-
* specified seconds.
*/
static inline void mod_peer_timer(struct wg_peer *peer,
struct timer_list *timer,
unsigned long expires)
{
rcu_read_lock_bh();
if (likely(netif_running(peer->device->dev) &&
!READ_ONCE(peer->is_dead)))
mod_timer(timer, expires);
rcu_read_unlock_bh();
}
static void wg_expired_retransmit_handshake(struct timer_list *timer)
{
struct wg_peer *peer = from_timer(peer, timer,
timer_retransmit_handshake);
if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) {
pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr, (int)MAX_TIMER_HANDSHAKES + 2);
del_timer(&peer->timer_send_keepalive);
/* We drop all packets without a keypair and don't try again,
* if we try unsuccessfully for too long to make a handshake.
*/
wg_packet_purge_staged_packets(peer);
/* We set a timer for destroying any residue that might be left
* of a partial exchange.
*/
if (!timer_pending(&peer->timer_zero_key_material))
mod_peer_timer(peer, &peer->timer_zero_key_material,
jiffies + REJECT_AFTER_TIME * 3 * HZ);
} else {
++peer->timer_handshake_attempts;
pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr, (int)REKEY_TIMEOUT,
peer->timer_handshake_attempts + 1);
/* We clear the endpoint address src address, in case this is
* the cause of trouble.
*/
wg_socket_clear_peer_endpoint_src(peer);
wg_packet_send_queued_handshake_initiation(peer, true);
}
}
static void wg_expired_send_keepalive(struct timer_list *timer)
{
struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive);
wg_packet_send_keepalive(peer);
if (peer->timer_need_another_keepalive) {
peer->timer_need_another_keepalive = false;
mod_peer_timer(peer, &peer->timer_send_keepalive,
jiffies + KEEPALIVE_TIMEOUT * HZ);
}
}
static void wg_expired_new_handshake(struct timer_list *timer)
{
struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake);
pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr, (int)(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT));
/* We clear the endpoint address src address, in case this is the cause
* of trouble.
*/
wg_socket_clear_peer_endpoint_src(peer);
wg_packet_send_queued_handshake_initiation(peer, false);
}
static void wg_expired_zero_key_material(struct timer_list *timer)
{
struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material);
rcu_read_lock_bh();
if (!READ_ONCE(peer->is_dead)) {
wg_peer_get(peer);
if (!queue_work(peer->device->handshake_send_wq,
&peer->clear_peer_work))
/* If the work was already on the queue, we want to drop
* the extra reference.
*/
wg_peer_put(peer);
}
rcu_read_unlock_bh();
}
static void wg_queued_expired_zero_key_material(struct work_struct *work)
{
struct wg_peer *peer = container_of(work, struct wg_peer,
clear_peer_work);
pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n",
peer->device->dev->name, peer->internal_id,
&peer->endpoint.addr, (int)REJECT_AFTER_TIME * 3);
wg_noise_handshake_clear(&peer->handshake);
wg_noise_keypairs_clear(&peer->keypairs);
wg_peer_put(peer);
}
static void wg_expired_send_persistent_keepalive(struct timer_list *timer)
{
struct wg_peer *peer = from_timer(peer, timer,
timer_persistent_keepalive);
if (likely(peer->persistent_keepalive_interval))
wg_packet_send_keepalive(peer);
}
/* Should be called after an authenticated data packet is sent. */
void wg_timers_data_sent(struct wg_peer *peer)
{
if (!timer_pending(&peer->timer_new_handshake))
mod_peer_timer(peer, &peer->timer_new_handshake,
jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ +
get_random_u32_below(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
}
/* Should be called after an authenticated data packet is received. */
void wg_timers_data_received(struct wg_peer *peer)
{
if (likely(netif_running(peer->device->dev))) {
if (!timer_pending(&peer->timer_send_keepalive))
mod_peer_timer(peer, &peer->timer_send_keepalive,
jiffies + KEEPALIVE_TIMEOUT * HZ);
else
peer->timer_need_another_keepalive = true;
}
}
/* Should be called after any type of authenticated packet is sent, whether
* keepalive, data, or handshake.
*/
void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer)
{
del_timer(&peer->timer_send_keepalive);
}
/* Should be called after any type of authenticated packet is received, whether
* keepalive, data, or handshake.
*/
void wg_timers_any_authenticated_packet_received(struct wg_peer *peer)
{
del_timer(&peer->timer_new_handshake);
}
/* Should be called after a handshake initiation message is sent. */
void wg_timers_handshake_initiated(struct wg_peer *peer)
{
mod_peer_timer(peer, &peer->timer_retransmit_handshake,
jiffies + REKEY_TIMEOUT * HZ +
get_random_u32_below(REKEY_TIMEOUT_JITTER_MAX_JIFFIES));
}
/* Should be called after a handshake response message is received and processed
* or when getting key confirmation via the first data message.
*/
void wg_timers_handshake_complete(struct wg_peer *peer)
{
del_timer(&peer->timer_retransmit_handshake);
peer->timer_handshake_attempts = 0;
peer->sent_lastminute_handshake = false;
ktime_get_real_ts64(&peer->walltime_last_handshake);
}
/* Should be called after an ephemeral key is created, which is before sending a
* handshake response or after receiving a handshake response.
*/
void wg_timers_session_derived(struct wg_peer *peer)
{
mod_peer_timer(peer, &peer->timer_zero_key_material,
jiffies + REJECT_AFTER_TIME * 3 * HZ);
}
/* Should be called before a packet with authentication, whether
* keepalive, data, or handshakem is sent, or after one is received.
*/
void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer)
{
if (peer->persistent_keepalive_interval)
mod_peer_timer(peer, &peer->timer_persistent_keepalive,
jiffies + peer->persistent_keepalive_interval * HZ);
}
void wg_timers_init(struct wg_peer *peer)
{
timer_setup(&peer->timer_retransmit_handshake,
wg_expired_retransmit_handshake, 0);
timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0);
timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0);
timer_setup(&peer->timer_zero_key_material,
wg_expired_zero_key_material, 0);
timer_setup(&peer->timer_persistent_keepalive,
wg_expired_send_persistent_keepalive, 0);
INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material);
peer->timer_handshake_attempts = 0;
peer->sent_lastminute_handshake = false;
peer->timer_need_another_keepalive = false;
}
void wg_timers_stop(struct wg_peer *peer)
{
timer_delete_sync(&peer->timer_retransmit_handshake);
timer_delete_sync(&peer->timer_send_keepalive);
timer_delete_sync(&peer->timer_new_handshake);
timer_delete_sync(&peer->timer_zero_key_material);
timer_delete_sync(&peer->timer_persistent_keepalive);
flush_work(&peer->clear_peer_work);
}
|
linux-master
|
drivers/net/wireguard/timers.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "queueing.h"
#include "device.h"
#include "peer.h"
#include "timers.h"
#include "messages.h"
#include "cookie.h"
#include "socket.h"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <net/ip_tunnels.h>
/* Must be called with bh disabled. */
static void update_rx_stats(struct wg_peer *peer, size_t len)
{
dev_sw_netstats_rx_add(peer->device->dev, len);
peer->rx_bytes += len;
}
#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
static size_t validate_header_len(struct sk_buff *skb)
{
if (unlikely(skb->len < sizeof(struct message_header)))
return 0;
if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) &&
skb->len >= MESSAGE_MINIMUM_LENGTH)
return sizeof(struct message_data);
if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) &&
skb->len == sizeof(struct message_handshake_initiation))
return sizeof(struct message_handshake_initiation);
if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) &&
skb->len == sizeof(struct message_handshake_response))
return sizeof(struct message_handshake_response);
if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) &&
skb->len == sizeof(struct message_handshake_cookie))
return sizeof(struct message_handshake_cookie);
return 0;
}
static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
{
size_t data_offset, data_len, header_len;
struct udphdr *udp;
if (unlikely(!wg_check_packet_protocol(skb) ||
skb_transport_header(skb) < skb->head ||
(skb_transport_header(skb) + sizeof(struct udphdr)) >
skb_tail_pointer(skb)))
return -EINVAL; /* Bogus IP header */
udp = udp_hdr(skb);
data_offset = (u8 *)udp - skb->data;
if (unlikely(data_offset > U16_MAX ||
data_offset + sizeof(struct udphdr) > skb->len))
/* Packet has offset at impossible location or isn't big enough
* to have UDP fields.
*/
return -EINVAL;
data_len = ntohs(udp->len);
if (unlikely(data_len < sizeof(struct udphdr) ||
data_len > skb->len - data_offset))
/* UDP packet is reporting too small of a size or lying about
* its size.
*/
return -EINVAL;
data_len -= sizeof(struct udphdr);
data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data;
if (unlikely(!pskb_may_pull(skb,
data_offset + sizeof(struct message_header)) ||
pskb_trim(skb, data_len + data_offset) < 0))
return -EINVAL;
skb_pull(skb, data_offset);
if (unlikely(skb->len != data_len))
/* Final len does not agree with calculated len */
return -EINVAL;
header_len = validate_header_len(skb);
if (unlikely(!header_len))
return -EINVAL;
__skb_push(skb, data_offset);
if (unlikely(!pskb_may_pull(skb, data_offset + header_len)))
return -EINVAL;
__skb_pull(skb, data_offset);
return 0;
}
static void wg_receive_handshake_packet(struct wg_device *wg,
struct sk_buff *skb)
{
enum cookie_mac_state mac_state;
struct wg_peer *peer = NULL;
/* This is global, so that our load calculation applies to the whole
* system. We don't care about races with it at all.
*/
static u64 last_under_load;
bool packet_needs_cookie;
bool under_load;
if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) {
net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n",
wg->dev->name, skb);
wg_cookie_message_consume(
(struct message_handshake_cookie *)skb->data, wg);
return;
}
under_load = atomic_read(&wg->handshake_queue_len) >=
MAX_QUEUED_INCOMING_HANDSHAKES / 8;
if (under_load) {
last_under_load = ktime_get_coarse_boottime_ns();
} else if (last_under_load) {
under_load = !wg_birthdate_has_expired(last_under_load, 1);
if (!under_load)
last_under_load = 0;
}
mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
under_load);
if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
(!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) {
packet_needs_cookie = false;
} else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) {
packet_needs_cookie = true;
} else {
net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n",
wg->dev->name, skb);
return;
}
switch (SKB_TYPE_LE32(skb)) {
case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): {
struct message_handshake_initiation *message =
(struct message_handshake_initiation *)skb->data;
if (packet_needs_cookie) {
wg_packet_send_handshake_cookie(wg, skb,
message->sender_index);
return;
}
peer = wg_noise_handshake_consume_initiation(message, wg);
if (unlikely(!peer)) {
net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n",
wg->dev->name, skb);
return;
}
wg_socket_set_peer_endpoint_from_skb(peer, skb);
net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n",
wg->dev->name, peer->internal_id,
&peer->endpoint.addr);
wg_packet_send_handshake_response(peer);
break;
}
case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): {
struct message_handshake_response *message =
(struct message_handshake_response *)skb->data;
if (packet_needs_cookie) {
wg_packet_send_handshake_cookie(wg, skb,
message->sender_index);
return;
}
peer = wg_noise_handshake_consume_response(message, wg);
if (unlikely(!peer)) {
net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n",
wg->dev->name, skb);
return;
}
wg_socket_set_peer_endpoint_from_skb(peer, skb);
net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n",
wg->dev->name, peer->internal_id,
&peer->endpoint.addr);
if (wg_noise_handshake_begin_session(&peer->handshake,
&peer->keypairs)) {
wg_timers_session_derived(peer);
wg_timers_handshake_complete(peer);
/* Calling this function will either send any existing
* packets in the queue and not send a keepalive, which
* is the best case, Or, if there's nothing in the
* queue, it will send a keepalive, in order to give
* immediate confirmation of the session.
*/
wg_packet_send_keepalive(peer);
}
break;
}
}
if (unlikely(!peer)) {
WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n");
return;
}
local_bh_disable();
update_rx_stats(peer, skb->len);
local_bh_enable();
wg_timers_any_authenticated_packet_received(peer);
wg_timers_any_authenticated_packet_traversal(peer);
wg_peer_put(peer);
}
void wg_packet_handshake_receive_worker(struct work_struct *work)
{
struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
struct sk_buff *skb;
while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
wg_receive_handshake_packet(wg, skb);
dev_kfree_skb(skb);
atomic_dec(&wg->handshake_queue_len);
cond_resched();
}
}
static void keep_key_fresh(struct wg_peer *peer)
{
struct noise_keypair *keypair;
bool send;
if (peer->sent_lastminute_handshake)
return;
rcu_read_lock_bh();
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
send = keypair && READ_ONCE(keypair->sending.is_valid) &&
keypair->i_am_the_initiator &&
wg_birthdate_has_expired(keypair->sending.birthdate,
REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
rcu_read_unlock_bh();
if (unlikely(send)) {
peer->sent_lastminute_handshake = true;
wg_packet_send_queued_handshake_initiation(peer, false);
}
}
static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
{
struct scatterlist sg[MAX_SKB_FRAGS + 8];
struct sk_buff *trailer;
unsigned int offset;
int num_frags;
if (unlikely(!keypair))
return false;
if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
WRITE_ONCE(keypair->receiving.is_valid, false);
return false;
}
PACKET_CB(skb)->nonce =
le64_to_cpu(((struct message_data *)skb->data)->counter);
/* We ensure that the network header is part of the packet before we
* call skb_cow_data, so that there's no chance that data is removed
* from the skb, so that later we can extract the original endpoint.
*/
offset = skb->data - skb_network_header(skb);
skb_push(skb, offset);
num_frags = skb_cow_data(skb, 0, &trailer);
offset += sizeof(struct message_data);
skb_pull(skb, offset);
if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
return false;
sg_init_table(sg, num_frags);
if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0)
return false;
if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
PACKET_CB(skb)->nonce,
keypair->receiving.key))
return false;
/* Another ugly situation of pushing and pulling the header so as to
* keep endpoint information intact.
*/
skb_push(skb, offset);
if (pskb_trim(skb, skb->len - noise_encrypted_len(0)))
return false;
skb_pull(skb, offset);
return true;
}
/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
{
unsigned long index, index_current, top, i;
bool ret = false;
spin_lock_bh(&counter->lock);
if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
their_counter >= REJECT_AFTER_MESSAGES))
goto out;
++their_counter;
if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
counter->counter))
goto out;
index = their_counter >> ilog2(BITS_PER_LONG);
if (likely(their_counter > counter->counter)) {
index_current = counter->counter >> ilog2(BITS_PER_LONG);
top = min_t(unsigned long, index - index_current,
COUNTER_BITS_TOTAL / BITS_PER_LONG);
for (i = 1; i <= top; ++i)
counter->backtrack[(i + index_current) &
((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
counter->counter = their_counter;
}
index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
&counter->backtrack[index]);
out:
spin_unlock_bh(&counter->lock);
return ret;
}
#include "selftest/counter.c"
static void wg_packet_consume_data_done(struct wg_peer *peer,
struct sk_buff *skb,
struct endpoint *endpoint)
{
struct net_device *dev = peer->device->dev;
unsigned int len, len_before_trim;
struct wg_peer *routed_peer;
wg_socket_set_peer_endpoint(peer, endpoint);
if (unlikely(wg_noise_received_with_keypair(&peer->keypairs,
PACKET_CB(skb)->keypair))) {
wg_timers_handshake_complete(peer);
wg_packet_send_staged_packets(peer);
}
keep_key_fresh(peer);
wg_timers_any_authenticated_packet_received(peer);
wg_timers_any_authenticated_packet_traversal(peer);
/* A packet with length 0 is a keepalive packet */
if (unlikely(!skb->len)) {
update_rx_stats(peer, message_data_len(0));
net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id,
&peer->endpoint.addr);
goto packet_processed;
}
wg_timers_data_received(peer);
if (unlikely(skb_network_header(skb) < skb->head))
goto dishonest_packet_size;
if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) &&
(ip_hdr(skb)->version == 4 ||
(ip_hdr(skb)->version == 6 &&
pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))))))
goto dishonest_packet_type;
skb->dev = dev;
/* We've already verified the Poly1305 auth tag, which means this packet
* was not modified in transit. We can therefore tell the networking
* stack that all checksums of every layer of encapsulation have already
* been checked "by the hardware" and therefore is unnecessary to check
* again in software.
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = ~0; /* All levels */
skb->protocol = ip_tunnel_parse_protocol(skb);
if (skb->protocol == htons(ETH_P_IP)) {
len = ntohs(ip_hdr(skb)->tot_len);
if (unlikely(len < sizeof(struct iphdr)))
goto dishonest_packet_size;
INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
len = ntohs(ipv6_hdr(skb)->payload_len) +
sizeof(struct ipv6hdr);
INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
} else {
goto dishonest_packet_type;
}
if (unlikely(len > skb->len))
goto dishonest_packet_size;
len_before_trim = skb->len;
if (unlikely(pskb_trim(skb, len)))
goto packet_processed;
routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips,
skb);
wg_peer_put(routed_peer); /* We don't need the extra reference. */
if (unlikely(routed_peer != peer))
goto dishonest_packet_peer;
napi_gro_receive(&peer->napi, skb);
update_rx_stats(peer, message_data_len(len_before_trim));
return;
dishonest_packet_peer:
net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
dev->name, skb, peer->internal_id,
&peer->endpoint.addr);
++dev->stats.rx_errors;
++dev->stats.rx_frame_errors;
goto packet_processed;
dishonest_packet_type:
net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr);
++dev->stats.rx_errors;
++dev->stats.rx_frame_errors;
goto packet_processed;
dishonest_packet_size:
net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
dev->name, peer->internal_id, &peer->endpoint.addr);
++dev->stats.rx_errors;
++dev->stats.rx_length_errors;
goto packet_processed;
packet_processed:
dev_kfree_skb(skb);
}
int wg_packet_rx_poll(struct napi_struct *napi, int budget)
{
struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
struct noise_keypair *keypair;
struct endpoint endpoint;
enum packet_state state;
struct sk_buff *skb;
int work_done = 0;
bool free;
if (unlikely(budget <= 0))
return 0;
while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL &&
(state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
PACKET_STATE_UNCRYPTED) {
wg_prev_queue_drop_peeked(&peer->rx_queue);
keypair = PACKET_CB(skb)->keypair;
free = true;
if (unlikely(state != PACKET_STATE_CRYPTED))
goto next;
if (unlikely(!counter_validate(&keypair->receiving_counter,
PACKET_CB(skb)->nonce))) {
net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
peer->device->dev->name,
PACKET_CB(skb)->nonce,
keypair->receiving_counter.counter);
goto next;
}
if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
goto next;
wg_reset_packet(skb, false);
wg_packet_consume_data_done(peer, skb, &endpoint);
free = false;
next:
wg_noise_keypair_put(keypair, false);
wg_peer_put(peer);
if (unlikely(free))
dev_kfree_skb(skb);
if (++work_done >= budget)
break;
}
if (work_done < budget)
napi_complete_done(napi, work_done);
return work_done;
}
void wg_packet_decrypt_worker(struct work_struct *work)
{
struct crypt_queue *queue = container_of(work, struct multicore_worker,
work)->ptr;
struct sk_buff *skb;
while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
enum packet_state state =
likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
wg_queue_enqueue_per_peer_rx(skb, state);
if (need_resched())
cond_resched();
}
}
static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
{
__le32 idx = ((struct message_data *)skb->data)->key_idx;
struct wg_peer *peer = NULL;
int ret;
rcu_read_lock_bh();
PACKET_CB(skb)->keypair =
(struct noise_keypair *)wg_index_hashtable_lookup(
wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
&peer);
if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair)))
goto err_keypair;
if (unlikely(READ_ONCE(peer->is_dead)))
goto err;
ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
wg->packet_crypt_wq);
if (unlikely(ret == -EPIPE))
wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
if (likely(!ret || ret == -EPIPE)) {
rcu_read_unlock_bh();
return;
}
err:
wg_noise_keypair_put(PACKET_CB(skb)->keypair, false);
err_keypair:
rcu_read_unlock_bh();
wg_peer_put(peer);
dev_kfree_skb(skb);
}
void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
{
if (unlikely(prepare_skb_header(skb, wg) < 0))
goto err;
switch (SKB_TYPE_LE32(skb)) {
case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
int cpu, ret = -EBUSY;
if (unlikely(!rng_is_initialized()))
goto drop;
if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
}
} else
ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
if (ret) {
drop:
net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
wg->dev->name, skb);
goto err;
}
atomic_inc(&wg->handshake_queue_len);
cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
/* Queues up a call to packet_process_queued_handshake_packets(skb): */
queue_work_on(cpu, wg->handshake_receive_wq,
&per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
break;
}
case cpu_to_le32(MESSAGE_DATA):
PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
wg_packet_consume_data(wg, skb);
break;
default:
WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n");
goto err;
}
return;
err:
dev_kfree_skb(skb);
}
|
linux-master
|
drivers/net/wireguard/receive.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "queueing.h"
#include "socket.h"
#include "timers.h"
#include "device.h"
#include "ratelimiter.h"
#include "peer.h"
#include "messages.h"
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/if_arp.h>
#include <linux/icmp.h>
#include <linux/suspend.h>
#include <net/dst_metadata.h>
#include <net/gso.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
#include <net/ip_tunnels.h>
#include <net/addrconf.h>
static LIST_HEAD(device_list);
static int wg_open(struct net_device *dev)
{
struct in_device *dev_v4 = __in_dev_get_rtnl(dev);
struct inet6_dev *dev_v6 = __in6_dev_get(dev);
struct wg_device *wg = netdev_priv(dev);
struct wg_peer *peer;
int ret;
if (dev_v4) {
/* At some point we might put this check near the ip_rt_send_
* redirect call of ip_forward in net/ipv4/ip_forward.c, similar
* to the current secpath check.
*/
IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
}
if (dev_v6)
dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
mutex_lock(&wg->device_update_lock);
ret = wg_socket_init(wg, wg->incoming_port);
if (ret < 0)
goto out;
list_for_each_entry(peer, &wg->peer_list, peer_list) {
wg_packet_send_staged_packets(peer);
if (peer->persistent_keepalive_interval)
wg_packet_send_keepalive(peer);
}
out:
mutex_unlock(&wg->device_update_lock);
return ret;
}
static int wg_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
{
struct wg_device *wg;
struct wg_peer *peer;
/* If the machine is constantly suspending and resuming, as part of
* its normal operation rather than as a somewhat rare event, then we
* don't actually want to clear keys.
*/
if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) ||
IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP))
return 0;
if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
return 0;
rtnl_lock();
list_for_each_entry(wg, &device_list, device_list) {
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list) {
del_timer(&peer->timer_zero_key_material);
wg_noise_handshake_clear(&peer->handshake);
wg_noise_keypairs_clear(&peer->keypairs);
}
mutex_unlock(&wg->device_update_lock);
}
rtnl_unlock();
rcu_barrier();
return 0;
}
static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
static int wg_vm_notification(struct notifier_block *nb, unsigned long action, void *data)
{
struct wg_device *wg;
struct wg_peer *peer;
rtnl_lock();
list_for_each_entry(wg, &device_list, device_list) {
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list)
wg_noise_expire_current_peer_keypairs(peer);
mutex_unlock(&wg->device_update_lock);
}
rtnl_unlock();
return 0;
}
static struct notifier_block vm_notifier = { .notifier_call = wg_vm_notification };
static int wg_stop(struct net_device *dev)
{
struct wg_device *wg = netdev_priv(dev);
struct wg_peer *peer;
struct sk_buff *skb;
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list) {
wg_packet_purge_staged_packets(peer);
wg_timers_stop(peer);
wg_noise_handshake_clear(&peer->handshake);
wg_noise_keypairs_clear(&peer->keypairs);
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
}
mutex_unlock(&wg->device_update_lock);
while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
kfree_skb(skb);
atomic_set(&wg->handshake_queue_len, 0);
wg_socket_reinit(wg, NULL, NULL);
return 0;
}
static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct wg_device *wg = netdev_priv(dev);
struct sk_buff_head packets;
struct wg_peer *peer;
struct sk_buff *next;
sa_family_t family;
u32 mtu;
int ret;
if (unlikely(!wg_check_packet_protocol(skb))) {
ret = -EPROTONOSUPPORT;
net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
goto err;
}
peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
if (unlikely(!peer)) {
ret = -ENOKEY;
if (skb->protocol == htons(ETH_P_IP))
net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
dev->name, &ip_hdr(skb)->daddr);
else if (skb->protocol == htons(ETH_P_IPV6))
net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
dev->name, &ipv6_hdr(skb)->daddr);
goto err_icmp;
}
family = READ_ONCE(peer->endpoint.addr.sa_family);
if (unlikely(family != AF_INET && family != AF_INET6)) {
ret = -EDESTADDRREQ;
net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
dev->name, peer->internal_id);
goto err_peer;
}
mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
__skb_queue_head_init(&packets);
if (!skb_is_gso(skb)) {
skb_mark_not_on_list(skb);
} else {
struct sk_buff *segs = skb_gso_segment(skb, 0);
if (IS_ERR(segs)) {
ret = PTR_ERR(segs);
goto err_peer;
}
dev_kfree_skb(skb);
skb = segs;
}
skb_list_walk_safe(skb, skb, next) {
skb_mark_not_on_list(skb);
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
continue;
/* We only need to keep the original dst around for icmp,
* so at this point we're in a position to drop it.
*/
skb_dst_drop(skb);
PACKET_CB(skb)->mtu = mtu;
__skb_queue_tail(&packets, skb);
}
spin_lock_bh(&peer->staged_packet_queue.lock);
/* If the queue is getting too big, we start removing the oldest packets
* until it's small again. We do this before adding the new packet, so
* we don't remove GSO segments that are in excess.
*/
while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
++dev->stats.tx_dropped;
}
skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
spin_unlock_bh(&peer->staged_packet_queue.lock);
wg_packet_send_staged_packets(peer);
wg_peer_put(peer);
return NETDEV_TX_OK;
err_peer:
wg_peer_put(peer);
err_icmp:
if (skb->protocol == htons(ETH_P_IP))
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
err:
++dev->stats.tx_errors;
kfree_skb(skb);
return ret;
}
static const struct net_device_ops netdev_ops = {
.ndo_open = wg_open,
.ndo_stop = wg_stop,
.ndo_start_xmit = wg_xmit,
.ndo_get_stats64 = dev_get_tstats64
};
static void wg_destruct(struct net_device *dev)
{
struct wg_device *wg = netdev_priv(dev);
rtnl_lock();
list_del(&wg->device_list);
rtnl_unlock();
mutex_lock(&wg->device_update_lock);
rcu_assign_pointer(wg->creating_net, NULL);
wg->incoming_port = 0;
wg_socket_reinit(wg, NULL, NULL);
/* The final references are cleared in the below calls to destroy_workqueue. */
wg_peer_remove_all(wg);
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
wg_packet_queue_free(&wg->handshake_queue, true);
wg_packet_queue_free(&wg->decrypt_queue, false);
wg_packet_queue_free(&wg->encrypt_queue, false);
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
free_percpu(dev->tstats);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
pr_debug("%s: Interface destroyed\n", dev->name);
free_netdev(dev);
}
static const struct device_type device_type = { .name = KBUILD_MODNAME };
static void wg_setup(struct net_device *dev)
{
struct wg_device *wg = netdev_priv(dev);
enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) +
max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
dev->netdev_ops = &netdev_ops;
dev->header_ops = &ip_tunnel_header_ops;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE);
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->priv_flags |= IFF_NO_QUEUE;
dev->features |= NETIF_F_LLTX;
dev->features |= WG_NETDEV_FEATURES;
dev->hw_features |= WG_NETDEV_FEATURES;
dev->hw_enc_features |= WG_NETDEV_FEATURES;
dev->mtu = ETH_DATA_LEN - overhead;
dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
SET_NETDEV_DEVTYPE(dev, &device_type);
/* We need to keep the dst around in case of icmp replies. */
netif_keep_dst(dev);
memset(wg, 0, sizeof(*wg));
wg->dev = dev;
}
static int wg_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct wg_device *wg = netdev_priv(dev);
int ret = -ENOMEM;
rcu_assign_pointer(wg->creating_net, src_net);
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
wg_allowedips_init(&wg->peer_allowedips);
wg_cookie_checker_init(&wg->cookie_checker, wg);
INIT_LIST_HEAD(&wg->peer_list);
wg->device_update_gen = 1;
wg->peer_hashtable = wg_pubkey_hashtable_alloc();
if (!wg->peer_hashtable)
return ret;
wg->index_hashtable = wg_index_hashtable_alloc();
if (!wg->index_hashtable)
goto err_free_peer_hashtable;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
goto err_free_index_hashtable;
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_receive_wq)
goto err_free_tstats;
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_send_wq)
goto err_destroy_handshake_receive;
wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
if (!wg->packet_crypt_wq)
goto err_destroy_handshake_send;
ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
MAX_QUEUED_PACKETS);
if (ret < 0)
goto err_destroy_packet_crypt;
ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
MAX_QUEUED_PACKETS);
if (ret < 0)
goto err_free_encrypt_queue;
ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
MAX_QUEUED_INCOMING_HANDSHAKES);
if (ret < 0)
goto err_free_decrypt_queue;
ret = wg_ratelimiter_init();
if (ret < 0)
goto err_free_handshake_queue;
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
list_add(&wg->device_list, &device_list);
/* We wait until the end to assign priv_destructor, so that
* register_netdevice doesn't call it for us if it fails.
*/
dev->priv_destructor = wg_destruct;
pr_debug("%s: Interface created\n", dev->name);
return ret;
err_uninit_ratelimiter:
wg_ratelimiter_uninit();
err_free_handshake_queue:
wg_packet_queue_free(&wg->handshake_queue, false);
err_free_decrypt_queue:
wg_packet_queue_free(&wg->decrypt_queue, false);
err_free_encrypt_queue:
wg_packet_queue_free(&wg->encrypt_queue, false);
err_destroy_packet_crypt:
destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
destroy_workqueue(wg->handshake_send_wq);
err_destroy_handshake_receive:
destroy_workqueue(wg->handshake_receive_wq);
err_free_tstats:
free_percpu(dev->tstats);
err_free_index_hashtable:
kvfree(wg->index_hashtable);
err_free_peer_hashtable:
kvfree(wg->peer_hashtable);
return ret;
}
static struct rtnl_link_ops link_ops __read_mostly = {
.kind = KBUILD_MODNAME,
.priv_size = sizeof(struct wg_device),
.setup = wg_setup,
.newlink = wg_newlink,
};
static void wg_netns_pre_exit(struct net *net)
{
struct wg_device *wg;
struct wg_peer *peer;
rtnl_lock();
list_for_each_entry(wg, &device_list, device_list) {
if (rcu_access_pointer(wg->creating_net) == net) {
pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
netif_carrier_off(wg->dev);
mutex_lock(&wg->device_update_lock);
rcu_assign_pointer(wg->creating_net, NULL);
wg_socket_reinit(wg, NULL, NULL);
list_for_each_entry(peer, &wg->peer_list, peer_list)
wg_socket_clear_peer_endpoint_src(peer);
mutex_unlock(&wg->device_update_lock);
}
}
rtnl_unlock();
}
static struct pernet_operations pernet_ops = {
.pre_exit = wg_netns_pre_exit
};
int __init wg_device_init(void)
{
int ret;
ret = register_pm_notifier(&pm_notifier);
if (ret)
return ret;
ret = register_random_vmfork_notifier(&vm_notifier);
if (ret)
goto error_pm;
ret = register_pernet_device(&pernet_ops);
if (ret)
goto error_vm;
ret = rtnl_link_register(&link_ops);
if (ret)
goto error_pernet;
return 0;
error_pernet:
unregister_pernet_device(&pernet_ops);
error_vm:
unregister_random_vmfork_notifier(&vm_notifier);
error_pm:
unregister_pm_notifier(&pm_notifier);
return ret;
}
void wg_device_uninit(void)
{
rtnl_link_unregister(&link_ops);
unregister_pernet_device(&pernet_ops);
unregister_random_vmfork_notifier(&vm_notifier);
unregister_pm_notifier(&pm_notifier);
rcu_barrier();
}
|
linux-master
|
drivers/net/wireguard/device.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "ratelimiter.h"
#include <linux/siphash.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <net/ip.h>
static struct kmem_cache *entry_cache;
static hsiphash_key_t key;
static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock");
static DEFINE_MUTEX(init_lock);
static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */
static atomic_t total_entries = ATOMIC_INIT(0);
static unsigned int max_entries, table_size;
static void wg_ratelimiter_gc_entries(struct work_struct *);
static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries);
static struct hlist_head *table_v4;
#if IS_ENABLED(CONFIG_IPV6)
static struct hlist_head *table_v6;
#endif
struct ratelimiter_entry {
u64 last_time_ns, tokens, ip;
void *net;
spinlock_t lock;
struct hlist_node hash;
struct rcu_head rcu;
};
enum {
PACKETS_PER_SECOND = 20,
PACKETS_BURSTABLE = 5,
PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND,
TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE
};
static void entry_free(struct rcu_head *rcu)
{
kmem_cache_free(entry_cache,
container_of(rcu, struct ratelimiter_entry, rcu));
atomic_dec(&total_entries);
}
static void entry_uninit(struct ratelimiter_entry *entry)
{
hlist_del_rcu(&entry->hash);
call_rcu(&entry->rcu, entry_free);
}
/* Calling this function with a NULL work uninits all entries. */
static void wg_ratelimiter_gc_entries(struct work_struct *work)
{
const u64 now = ktime_get_coarse_boottime_ns();
struct ratelimiter_entry *entry;
struct hlist_node *temp;
unsigned int i;
for (i = 0; i < table_size; ++i) {
spin_lock(&table_lock);
hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) {
if (unlikely(!work) ||
now - entry->last_time_ns > NSEC_PER_SEC)
entry_uninit(entry);
}
#if IS_ENABLED(CONFIG_IPV6)
hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) {
if (unlikely(!work) ||
now - entry->last_time_ns > NSEC_PER_SEC)
entry_uninit(entry);
}
#endif
spin_unlock(&table_lock);
if (likely(work))
cond_resched();
}
if (likely(work))
queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
}
bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net)
{
/* We only take the bottom half of the net pointer, so that we can hash
* 3 words in the end. This way, siphash's len param fits into the final
* u32, and we don't incur an extra round.
*/
const u32 net_word = (unsigned long)net;
struct ratelimiter_entry *entry;
struct hlist_head *bucket;
u64 ip;
if (skb->protocol == htons(ETH_P_IP)) {
ip = (u64 __force)ip_hdr(skb)->saddr;
bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) &
(table_size - 1)];
}
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
/* Only use 64 bits, so as to ratelimit the whole /64. */
memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip));
bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) &
(table_size - 1)];
}
#endif
else
return false;
rcu_read_lock();
hlist_for_each_entry_rcu(entry, bucket, hash) {
if (entry->net == net && entry->ip == ip) {
u64 now, tokens;
bool ret;
/* Quasi-inspired by nft_limit.c, but this is actually a
* slightly different algorithm. Namely, we incorporate
* the burst as part of the maximum tokens, rather than
* as part of the rate.
*/
spin_lock(&entry->lock);
now = ktime_get_coarse_boottime_ns();
tokens = min_t(u64, TOKEN_MAX,
entry->tokens + now -
entry->last_time_ns);
entry->last_time_ns = now;
ret = tokens >= PACKET_COST;
entry->tokens = ret ? tokens - PACKET_COST : tokens;
spin_unlock(&entry->lock);
rcu_read_unlock();
return ret;
}
}
rcu_read_unlock();
if (atomic_inc_return(&total_entries) > max_entries)
goto err_oom;
entry = kmem_cache_alloc(entry_cache, GFP_KERNEL);
if (unlikely(!entry))
goto err_oom;
entry->net = net;
entry->ip = ip;
INIT_HLIST_NODE(&entry->hash);
spin_lock_init(&entry->lock);
entry->last_time_ns = ktime_get_coarse_boottime_ns();
entry->tokens = TOKEN_MAX - PACKET_COST;
spin_lock(&table_lock);
hlist_add_head_rcu(&entry->hash, bucket);
spin_unlock(&table_lock);
return true;
err_oom:
atomic_dec(&total_entries);
return false;
}
int wg_ratelimiter_init(void)
{
mutex_lock(&init_lock);
if (++init_refcnt != 1)
goto out;
entry_cache = KMEM_CACHE(ratelimiter_entry, 0);
if (!entry_cache)
goto err;
/* xt_hashlimit.c uses a slightly different algorithm for ratelimiting,
* but what it shares in common is that it uses a massive hashtable. So,
* we borrow their wisdom about good table sizes on different systems
* dependent on RAM. This calculation here comes from there.
*/
table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 :
max_t(unsigned long, 16, roundup_pow_of_two(
(totalram_pages() << PAGE_SHIFT) /
(1U << 14) / sizeof(struct hlist_head)));
max_entries = table_size * 8;
table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
if (unlikely(!table_v4))
goto err_kmemcache;
#if IS_ENABLED(CONFIG_IPV6)
table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
if (unlikely(!table_v6)) {
kvfree(table_v4);
goto err_kmemcache;
}
#endif
queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
get_random_bytes(&key, sizeof(key));
out:
mutex_unlock(&init_lock);
return 0;
err_kmemcache:
kmem_cache_destroy(entry_cache);
err:
--init_refcnt;
mutex_unlock(&init_lock);
return -ENOMEM;
}
void wg_ratelimiter_uninit(void)
{
mutex_lock(&init_lock);
if (!init_refcnt || --init_refcnt)
goto out;
cancel_delayed_work_sync(&gc_work);
wg_ratelimiter_gc_entries(NULL);
rcu_barrier();
kvfree(table_v4);
#if IS_ENABLED(CONFIG_IPV6)
kvfree(table_v6);
#endif
kmem_cache_destroy(entry_cache);
out:
mutex_unlock(&init_lock);
}
#include "selftest/ratelimiter.c"
|
linux-master
|
drivers/net/wireguard/ratelimiter.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "netlink.h"
#include "device.h"
#include "peer.h"
#include "socket.h"
#include "queueing.h"
#include "messages.h"
#include <uapi/linux/wireguard.h>
#include <linux/if.h>
#include <net/genetlink.h>
#include <net/sock.h>
#include <crypto/algapi.h>
static struct genl_family genl_family;
static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
[WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
[WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
[WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
[WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
[WGDEVICE_A_PEERS] = { .type = NLA_NESTED }
};
static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
[WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN),
[WGPEER_A_FLAGS] = { .type = NLA_U32 },
[WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)),
[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
[WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)),
[WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
[WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
[WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
[WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 }
};
static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
[WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
[WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)),
[WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
};
static struct wg_device *lookup_interface(struct nlattr **attrs,
struct sk_buff *skb)
{
struct net_device *dev = NULL;
if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME])
return ERR_PTR(-EBADR);
if (attrs[WGDEVICE_A_IFINDEX])
dev = dev_get_by_index(sock_net(skb->sk),
nla_get_u32(attrs[WGDEVICE_A_IFINDEX]));
else if (attrs[WGDEVICE_A_IFNAME])
dev = dev_get_by_name(sock_net(skb->sk),
nla_data(attrs[WGDEVICE_A_IFNAME]));
if (!dev)
return ERR_PTR(-ENODEV);
if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind ||
strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) {
dev_put(dev);
return ERR_PTR(-EOPNOTSUPP);
}
return netdev_priv(dev);
}
static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr,
int family)
{
struct nlattr *allowedip_nest;
allowedip_nest = nla_nest_start(skb, 0);
if (!allowedip_nest)
return -EMSGSIZE;
if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) ||
nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) ||
nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ?
sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) {
nla_nest_cancel(skb, allowedip_nest);
return -EMSGSIZE;
}
nla_nest_end(skb, allowedip_nest);
return 0;
}
struct dump_ctx {
struct wg_device *wg;
struct wg_peer *next_peer;
u64 allowedips_seq;
struct allowedips_node *next_allowedip;
};
#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args)
static int
get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
{
struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0);
struct allowedips_node *allowedips_node = ctx->next_allowedip;
bool fail;
if (!peer_nest)
return -EMSGSIZE;
down_read(&peer->handshake.lock);
fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN,
peer->handshake.remote_static);
up_read(&peer->handshake.lock);
if (fail)
goto err;
if (!allowedips_node) {
const struct __kernel_timespec last_handshake = {
.tv_sec = peer->walltime_last_handshake.tv_sec,
.tv_nsec = peer->walltime_last_handshake.tv_nsec
};
down_read(&peer->handshake.lock);
fail = nla_put(skb, WGPEER_A_PRESHARED_KEY,
NOISE_SYMMETRIC_KEY_LEN,
peer->handshake.preshared_key);
up_read(&peer->handshake.lock);
if (fail)
goto err;
if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME,
sizeof(last_handshake), &last_handshake) ||
nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
peer->persistent_keepalive_interval) ||
nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes,
WGPEER_A_UNSPEC) ||
nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes,
WGPEER_A_UNSPEC) ||
nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1))
goto err;
read_lock_bh(&peer->endpoint_lock);
if (peer->endpoint.addr.sa_family == AF_INET)
fail = nla_put(skb, WGPEER_A_ENDPOINT,
sizeof(peer->endpoint.addr4),
&peer->endpoint.addr4);
else if (peer->endpoint.addr.sa_family == AF_INET6)
fail = nla_put(skb, WGPEER_A_ENDPOINT,
sizeof(peer->endpoint.addr6),
&peer->endpoint.addr6);
read_unlock_bh(&peer->endpoint_lock);
if (fail)
goto err;
allowedips_node =
list_first_entry_or_null(&peer->allowedips_list,
struct allowedips_node, peer_list);
}
if (!allowedips_node)
goto no_allowedips;
if (!ctx->allowedips_seq)
ctx->allowedips_seq = peer->device->peer_allowedips.seq;
else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
goto no_allowedips;
allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
if (!allowedips_nest)
goto err;
list_for_each_entry_from(allowedips_node, &peer->allowedips_list,
peer_list) {
u8 cidr, ip[16] __aligned(__alignof(u64));
int family;
family = wg_allowedips_read_node(allowedips_node, ip, &cidr);
if (get_allowedips(skb, ip, cidr, family)) {
nla_nest_end(skb, allowedips_nest);
nla_nest_end(skb, peer_nest);
ctx->next_allowedip = allowedips_node;
return -EMSGSIZE;
}
}
nla_nest_end(skb, allowedips_nest);
no_allowedips:
nla_nest_end(skb, peer_nest);
ctx->next_allowedip = NULL;
ctx->allowedips_seq = 0;
return 0;
err:
nla_nest_cancel(skb, peer_nest);
return -EMSGSIZE;
}
static int wg_get_device_start(struct netlink_callback *cb)
{
struct wg_device *wg;
wg = lookup_interface(genl_info_dump(cb)->attrs, cb->skb);
if (IS_ERR(wg))
return PTR_ERR(wg);
DUMP_CTX(cb)->wg = wg;
return 0;
}
static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct wg_peer *peer, *next_peer_cursor;
struct dump_ctx *ctx = DUMP_CTX(cb);
struct wg_device *wg = ctx->wg;
struct nlattr *peers_nest;
int ret = -EMSGSIZE;
bool done = true;
void *hdr;
rtnl_lock();
mutex_lock(&wg->device_update_lock);
cb->seq = wg->device_update_gen;
next_peer_cursor = ctx->next_peer;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE);
if (!hdr)
goto out;
genl_dump_check_consistent(cb, hdr);
if (!ctx->next_peer) {
if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT,
wg->incoming_port) ||
nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) ||
nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) ||
nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name))
goto out;
down_read(&wg->static_identity.lock);
if (wg->static_identity.has_identity) {
if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY,
NOISE_PUBLIC_KEY_LEN,
wg->static_identity.static_private) ||
nla_put(skb, WGDEVICE_A_PUBLIC_KEY,
NOISE_PUBLIC_KEY_LEN,
wg->static_identity.static_public)) {
up_read(&wg->static_identity.lock);
goto out;
}
}
up_read(&wg->static_identity.lock);
}
peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS);
if (!peers_nest)
goto out;
ret = 0;
/* If the last cursor was removed via list_del_init in peer_remove, then
* we just treat this the same as there being no more peers left. The
* reason is that seq_nr should indicate to userspace that this isn't a
* coherent dump anyway, so they'll try again.
*/
if (list_empty(&wg->peer_list) ||
(ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
nla_nest_cancel(skb, peers_nest);
goto out;
}
lockdep_assert_held(&wg->device_update_lock);
peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
if (get_peer(peer, skb, ctx)) {
done = false;
break;
}
next_peer_cursor = peer;
}
nla_nest_end(skb, peers_nest);
out:
if (!ret && !done && next_peer_cursor)
wg_peer_get(next_peer_cursor);
wg_peer_put(ctx->next_peer);
mutex_unlock(&wg->device_update_lock);
rtnl_unlock();
if (ret) {
genlmsg_cancel(skb, hdr);
return ret;
}
genlmsg_end(skb, hdr);
if (done) {
ctx->next_peer = NULL;
return 0;
}
ctx->next_peer = next_peer_cursor;
return skb->len;
/* At this point, we can't really deal ourselves with safely zeroing out
* the private key material after usage. This will need an additional API
* in the kernel for marking skbs as zero_on_free.
*/
}
static int wg_get_device_done(struct netlink_callback *cb)
{
struct dump_ctx *ctx = DUMP_CTX(cb);
if (ctx->wg)
dev_put(ctx->wg->dev);
wg_peer_put(ctx->next_peer);
return 0;
}
static int set_port(struct wg_device *wg, u16 port)
{
struct wg_peer *peer;
if (wg->incoming_port == port)
return 0;
list_for_each_entry(peer, &wg->peer_list, peer_list)
wg_socket_clear_peer_endpoint_src(peer);
if (!netif_running(wg->dev)) {
wg->incoming_port = port;
return 0;
}
return wg_socket_init(wg, port);
}
static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs)
{
int ret = -EINVAL;
u16 family;
u8 cidr;
if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] ||
!attrs[WGALLOWEDIP_A_CIDR_MASK])
return ret;
family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]);
cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]);
if (family == AF_INET && cidr <= 32 &&
nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr))
ret = wg_allowedips_insert_v4(
&peer->device->peer_allowedips,
nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
&peer->device->device_update_lock);
else if (family == AF_INET6 && cidr <= 128 &&
nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr))
ret = wg_allowedips_insert_v6(
&peer->device->peer_allowedips,
nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer,
&peer->device->device_update_lock);
return ret;
}
static int set_peer(struct wg_device *wg, struct nlattr **attrs)
{
u8 *public_key = NULL, *preshared_key = NULL;
struct wg_peer *peer = NULL;
u32 flags = 0;
int ret;
ret = -EINVAL;
if (attrs[WGPEER_A_PUBLIC_KEY] &&
nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN)
public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]);
else
goto out;
if (attrs[WGPEER_A_PRESHARED_KEY] &&
nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN)
preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]);
if (attrs[WGPEER_A_FLAGS])
flags = nla_get_u32(attrs[WGPEER_A_FLAGS]);
ret = -EOPNOTSUPP;
if (flags & ~__WGPEER_F_ALL)
goto out;
ret = -EPFNOSUPPORT;
if (attrs[WGPEER_A_PROTOCOL_VERSION]) {
if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1)
goto out;
}
peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
nla_data(attrs[WGPEER_A_PUBLIC_KEY]));
ret = 0;
if (!peer) { /* Peer doesn't exist yet. Add a new one. */
if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY))
goto out;
/* The peer is new, so there aren't allowed IPs to remove. */
flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS;
down_read(&wg->static_identity.lock);
if (wg->static_identity.has_identity &&
!memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]),
wg->static_identity.static_public,
NOISE_PUBLIC_KEY_LEN)) {
/* We silently ignore peers that have the same public
* key as the device. The reason we do it silently is
* that we'd like for people to be able to reuse the
* same set of API calls across peers.
*/
up_read(&wg->static_identity.lock);
ret = 0;
goto out;
}
up_read(&wg->static_identity.lock);
peer = wg_peer_create(wg, public_key, preshared_key);
if (IS_ERR(peer)) {
ret = PTR_ERR(peer);
peer = NULL;
goto out;
}
/* Take additional reference, as though we've just been
* looked up.
*/
wg_peer_get(peer);
}
if (flags & WGPEER_F_REMOVE_ME) {
wg_peer_remove(peer);
goto out;
}
if (preshared_key) {
down_write(&peer->handshake.lock);
memcpy(&peer->handshake.preshared_key, preshared_key,
NOISE_SYMMETRIC_KEY_LEN);
up_write(&peer->handshake.lock);
}
if (attrs[WGPEER_A_ENDPOINT]) {
struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]);
size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]);
struct endpoint endpoint = { { { 0 } } };
if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) {
endpoint.addr4 = *(struct sockaddr_in *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
} else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) {
endpoint.addr6 = *(struct sockaddr_in6 *)addr;
wg_socket_set_peer_endpoint(peer, &endpoint);
}
}
if (flags & WGPEER_F_REPLACE_ALLOWEDIPS)
wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer,
&wg->device_update_lock);
if (attrs[WGPEER_A_ALLOWEDIPS]) {
struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1];
int rem;
nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) {
ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX,
attr, allowedip_policy, NULL);
if (ret < 0)
goto out;
ret = set_allowedip(peer, allowedip);
if (ret < 0)
goto out;
}
}
if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) {
const u16 persistent_keepalive_interval = nla_get_u16(
attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]);
const bool send_keepalive =
!peer->persistent_keepalive_interval &&
persistent_keepalive_interval &&
netif_running(wg->dev);
peer->persistent_keepalive_interval = persistent_keepalive_interval;
if (send_keepalive)
wg_packet_send_keepalive(peer);
}
if (netif_running(wg->dev))
wg_packet_send_staged_packets(peer);
out:
wg_peer_put(peer);
if (attrs[WGPEER_A_PRESHARED_KEY])
memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]),
nla_len(attrs[WGPEER_A_PRESHARED_KEY]));
return ret;
}
static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
{
struct wg_device *wg = lookup_interface(info->attrs, skb);
u32 flags = 0;
int ret;
if (IS_ERR(wg)) {
ret = PTR_ERR(wg);
goto out_nodev;
}
rtnl_lock();
mutex_lock(&wg->device_update_lock);
if (info->attrs[WGDEVICE_A_FLAGS])
flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]);
ret = -EOPNOTSUPP;
if (flags & ~__WGDEVICE_F_ALL)
goto out;
if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
struct net *net;
rcu_read_lock();
net = rcu_dereference(wg->creating_net);
ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
rcu_read_unlock();
if (ret)
goto out;
}
++wg->device_update_gen;
if (info->attrs[WGDEVICE_A_FWMARK]) {
struct wg_peer *peer;
wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]);
list_for_each_entry(peer, &wg->peer_list, peer_list)
wg_socket_clear_peer_endpoint_src(peer);
}
if (info->attrs[WGDEVICE_A_LISTEN_PORT]) {
ret = set_port(wg,
nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT]));
if (ret)
goto out;
}
if (flags & WGDEVICE_F_REPLACE_PEERS)
wg_peer_remove_all(wg);
if (info->attrs[WGDEVICE_A_PRIVATE_KEY] &&
nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) ==
NOISE_PUBLIC_KEY_LEN) {
u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]);
u8 public_key[NOISE_PUBLIC_KEY_LEN];
struct wg_peer *peer, *temp;
bool send_staged_packets;
if (!crypto_memneq(wg->static_identity.static_private,
private_key, NOISE_PUBLIC_KEY_LEN))
goto skip_set_private_key;
/* We remove before setting, to prevent race, which means doing
* two 25519-genpub ops.
*/
if (curve25519_generate_public(public_key, private_key)) {
peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable,
public_key);
if (peer) {
wg_peer_put(peer);
wg_peer_remove(peer);
}
}
down_write(&wg->static_identity.lock);
send_staged_packets = !wg->static_identity.has_identity && netif_running(wg->dev);
wg_noise_set_static_identity_private_key(&wg->static_identity, private_key);
send_staged_packets = send_staged_packets && wg->static_identity.has_identity;
wg_cookie_checker_precompute_device_keys(&wg->cookie_checker);
list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) {
wg_noise_precompute_static_static(peer);
wg_noise_expire_current_peer_keypairs(peer);
if (send_staged_packets)
wg_packet_send_staged_packets(peer);
}
up_write(&wg->static_identity.lock);
}
skip_set_private_key:
if (info->attrs[WGDEVICE_A_PEERS]) {
struct nlattr *attr, *peer[WGPEER_A_MAX + 1];
int rem;
nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) {
ret = nla_parse_nested(peer, WGPEER_A_MAX, attr,
peer_policy, NULL);
if (ret < 0)
goto out;
ret = set_peer(wg, peer);
if (ret < 0)
goto out;
}
}
ret = 0;
out:
mutex_unlock(&wg->device_update_lock);
rtnl_unlock();
dev_put(wg->dev);
out_nodev:
if (info->attrs[WGDEVICE_A_PRIVATE_KEY])
memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]),
nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]));
return ret;
}
static const struct genl_ops genl_ops[] = {
{
.cmd = WG_CMD_GET_DEVICE,
.start = wg_get_device_start,
.dumpit = wg_get_device_dump,
.done = wg_get_device_done,
.flags = GENL_UNS_ADMIN_PERM
}, {
.cmd = WG_CMD_SET_DEVICE,
.doit = wg_set_device,
.flags = GENL_UNS_ADMIN_PERM
}
};
static struct genl_family genl_family __ro_after_init = {
.ops = genl_ops,
.n_ops = ARRAY_SIZE(genl_ops),
.resv_start_op = WG_CMD_SET_DEVICE + 1,
.name = WG_GENL_NAME,
.version = WG_GENL_VERSION,
.maxattr = WGDEVICE_A_MAX,
.module = THIS_MODULE,
.policy = device_policy,
.netnsok = true
};
int __init wg_genetlink_init(void)
{
return genl_register_family(&genl_family);
}
void __exit wg_genetlink_uninit(void)
{
genl_unregister_family(&genl_family);
}
|
linux-master
|
drivers/net/wireguard/netlink.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#include "cookie.h"
#include "peer.h"
#include "device.h"
#include "messages.h"
#include "ratelimiter.h"
#include "timers.h"
#include <crypto/blake2s.h>
#include <crypto/chacha20poly1305.h>
#include <net/ipv6.h>
#include <crypto/algapi.h>
void wg_cookie_checker_init(struct cookie_checker *checker,
struct wg_device *wg)
{
init_rwsem(&checker->secret_lock);
checker->secret_birthdate = ktime_get_coarse_boottime_ns();
get_random_bytes(checker->secret, NOISE_HASH_LEN);
checker->device = wg;
}
enum { COOKIE_KEY_LABEL_LEN = 8 };
static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----";
static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--";
static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN],
const u8 pubkey[NOISE_PUBLIC_KEY_LEN],
const u8 label[COOKIE_KEY_LABEL_LEN])
{
struct blake2s_state blake;
blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN);
blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN);
blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN);
blake2s_final(&blake, key);
}
/* Must hold peer->handshake.static_identity->lock */
void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker)
{
if (likely(checker->device->static_identity.has_identity)) {
precompute_key(checker->cookie_encryption_key,
checker->device->static_identity.static_public,
cookie_key_label);
precompute_key(checker->message_mac1_key,
checker->device->static_identity.static_public,
mac1_key_label);
} else {
memset(checker->cookie_encryption_key, 0,
NOISE_SYMMETRIC_KEY_LEN);
memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN);
}
}
void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer)
{
precompute_key(peer->latest_cookie.cookie_decryption_key,
peer->handshake.remote_static, cookie_key_label);
precompute_key(peer->latest_cookie.message_mac1_key,
peer->handshake.remote_static, mac1_key_label);
}
void wg_cookie_init(struct cookie *cookie)
{
memset(cookie, 0, sizeof(*cookie));
init_rwsem(&cookie->lock);
}
static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len,
const u8 key[NOISE_SYMMETRIC_KEY_LEN])
{
len = len - sizeof(struct message_macs) +
offsetof(struct message_macs, mac1);
blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN);
}
static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len,
const u8 cookie[COOKIE_LEN])
{
len = len - sizeof(struct message_macs) +
offsetof(struct message_macs, mac2);
blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN);
}
static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb,
struct cookie_checker *checker)
{
struct blake2s_state state;
if (wg_birthdate_has_expired(checker->secret_birthdate,
COOKIE_SECRET_MAX_AGE)) {
down_write(&checker->secret_lock);
checker->secret_birthdate = ktime_get_coarse_boottime_ns();
get_random_bytes(checker->secret, NOISE_HASH_LEN);
up_write(&checker->secret_lock);
}
down_read(&checker->secret_lock);
blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN);
if (skb->protocol == htons(ETH_P_IP))
blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr,
sizeof(struct in_addr));
else if (skb->protocol == htons(ETH_P_IPV6))
blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr,
sizeof(struct in6_addr));
blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16));
blake2s_final(&state, cookie);
up_read(&checker->secret_lock);
}
enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker,
struct sk_buff *skb,
bool check_cookie)
{
struct message_macs *macs = (struct message_macs *)
(skb->data + skb->len - sizeof(*macs));
enum cookie_mac_state ret;
u8 computed_mac[COOKIE_LEN];
u8 cookie[COOKIE_LEN];
ret = INVALID_MAC;
compute_mac1(computed_mac, skb->data, skb->len,
checker->message_mac1_key);
if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN))
goto out;
ret = VALID_MAC_BUT_NO_COOKIE;
if (!check_cookie)
goto out;
make_cookie(cookie, skb, checker);
compute_mac2(computed_mac, skb->data, skb->len, cookie);
if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN))
goto out;
ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED;
if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev)))
goto out;
ret = VALID_MAC_WITH_COOKIE;
out:
return ret;
}
void wg_cookie_add_mac_to_packet(void *message, size_t len,
struct wg_peer *peer)
{
struct message_macs *macs = (struct message_macs *)
((u8 *)message + len - sizeof(*macs));
down_write(&peer->latest_cookie.lock);
compute_mac1(macs->mac1, message, len,
peer->latest_cookie.message_mac1_key);
memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN);
peer->latest_cookie.have_sent_mac1 = true;
up_write(&peer->latest_cookie.lock);
down_read(&peer->latest_cookie.lock);
if (peer->latest_cookie.is_valid &&
!wg_birthdate_has_expired(peer->latest_cookie.birthdate,
COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY))
compute_mac2(macs->mac2, message, len,
peer->latest_cookie.cookie);
else
memset(macs->mac2, 0, COOKIE_LEN);
up_read(&peer->latest_cookie.lock);
}
void wg_cookie_message_create(struct message_handshake_cookie *dst,
struct sk_buff *skb, __le32 index,
struct cookie_checker *checker)
{
struct message_macs *macs = (struct message_macs *)
((u8 *)skb->data + skb->len - sizeof(*macs));
u8 cookie[COOKIE_LEN];
dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE);
dst->receiver_index = index;
get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN);
make_cookie(cookie, skb, checker);
xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN,
macs->mac1, COOKIE_LEN, dst->nonce,
checker->cookie_encryption_key);
}
void wg_cookie_message_consume(struct message_handshake_cookie *src,
struct wg_device *wg)
{
struct wg_peer *peer = NULL;
u8 cookie[COOKIE_LEN];
bool ret;
if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable,
INDEX_HASHTABLE_HANDSHAKE |
INDEX_HASHTABLE_KEYPAIR,
src->receiver_index, &peer)))
return;
down_read(&peer->latest_cookie.lock);
if (unlikely(!peer->latest_cookie.have_sent_mac1)) {
up_read(&peer->latest_cookie.lock);
goto out;
}
ret = xchacha20poly1305_decrypt(
cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie),
peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce,
peer->latest_cookie.cookie_decryption_key);
up_read(&peer->latest_cookie.lock);
if (ret) {
down_write(&peer->latest_cookie.lock);
memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN);
peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns();
peer->latest_cookie.is_valid = true;
peer->latest_cookie.have_sent_mac1 = false;
up_write(&peer->latest_cookie.lock);
} else {
net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n",
wg->dev->name);
}
out:
wg_peer_put(peer);
}
|
linux-master
|
drivers/net/wireguard/cookie.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*
* This contains some basic static unit tests for the allowedips data structure.
* It also has two additional modes that are disabled and meant to be used by
* folks directly playing with this file. If you define the macro
* DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in
* memory, it will be printed out as KERN_DEBUG in a format that can be passed
* to graphviz (the dot command) to visualize it. If you define the macro
* DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of
* randomized tests done against a trivial implementation, which may take
* upwards of a half-hour to complete. There's no set of users who should be
* enabling these, and the only developers that should go anywhere near these
* nobs are the ones who are reading this comment.
*/
#ifdef DEBUG
#include <linux/siphash.h>
static __init void print_node(struct allowedips_node *node, u8 bits)
{
char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
u8 ip1[16], ip2[16], cidr1, cidr2;
char *style = "dotted";
u32 color = 0;
if (node == NULL)
return;
if (bits == 32) {
fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
} else if (bits == 128) {
fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
}
if (node->peer) {
hsiphash_key_t key = { { 0 } };
memcpy(&key, &node->peer, sizeof(node->peer));
color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 |
hsiphash_1u32(0xbabecafe, &key) % 200 << 8 |
hsiphash_1u32(0xabad1dea, &key) % 200;
style = "bold";
}
wg_allowedips_read_node(node, ip1, &cidr1);
printk(fmt_declaration, ip1, cidr1, style, color);
if (node->bit[0]) {
wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
printk(fmt_connection, ip1, cidr1, ip2, cidr2);
}
if (node->bit[1]) {
wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
printk(fmt_connection, ip1, cidr1, ip2, cidr2);
}
if (node->bit[0])
print_node(rcu_dereference_raw(node->bit[0]), bits);
if (node->bit[1])
print_node(rcu_dereference_raw(node->bit[1]), bits);
}
static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
{
printk(KERN_DEBUG "digraph trie {\n");
print_node(rcu_dereference_raw(top), bits);
printk(KERN_DEBUG "}\n");
}
enum {
NUM_PEERS = 2000,
NUM_RAND_ROUTES = 400,
NUM_MUTATED_ROUTES = 100,
NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30
};
struct horrible_allowedips {
struct hlist_head head;
};
struct horrible_allowedips_node {
struct hlist_node table;
union nf_inet_addr ip;
union nf_inet_addr mask;
u8 ip_version;
void *value;
};
static __init void horrible_allowedips_init(struct horrible_allowedips *table)
{
INIT_HLIST_HEAD(&table->head);
}
static __init void horrible_allowedips_free(struct horrible_allowedips *table)
{
struct horrible_allowedips_node *node;
struct hlist_node *h;
hlist_for_each_entry_safe(node, h, &table->head, table) {
hlist_del(&node->table);
kfree(node);
}
}
static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
{
union nf_inet_addr mask;
memset(&mask, 0, sizeof(mask));
memset(&mask.all, 0xff, cidr / 8);
if (cidr % 32)
mask.all[cidr / 32] = (__force u32)htonl(
(0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
return mask;
}
static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet)
{
return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) +
hweight32(subnet.all[2]) + hweight32(subnet.all[3]);
}
static __init inline void
horrible_mask_self(struct horrible_allowedips_node *node)
{
if (node->ip_version == 4) {
node->ip.ip &= node->mask.ip;
} else if (node->ip_version == 6) {
node->ip.ip6[0] &= node->mask.ip6[0];
node->ip.ip6[1] &= node->mask.ip6[1];
node->ip.ip6[2] &= node->mask.ip6[2];
node->ip.ip6[3] &= node->mask.ip6[3];
}
}
static __init inline bool
horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
{
return (ip->s_addr & node->mask.ip) == node->ip.ip;
}
static __init inline bool
horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
{
return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
(ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
(ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
(ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
}
static __init void
horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
{
struct horrible_allowedips_node *other = NULL, *where = NULL;
u8 my_cidr = horrible_mask_to_cidr(node->mask);
hlist_for_each_entry(other, &table->head, table) {
if (other->ip_version == node->ip_version &&
!memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
!memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
other->value = node->value;
kfree(node);
return;
}
}
hlist_for_each_entry(other, &table->head, table) {
where = other;
if (horrible_mask_to_cidr(other->mask) <= my_cidr)
break;
}
if (!other && !where)
hlist_add_head(&node->table, &table->head);
else if (!other)
hlist_add_behind(&node->table, &where->table);
else
hlist_add_before(&node->table, &where->table);
}
static __init int
horrible_allowedips_insert_v4(struct horrible_allowedips *table,
struct in_addr *ip, u8 cidr, void *value)
{
struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
node->ip.in = *ip;
node->mask = horrible_cidr_to_mask(cidr);
node->ip_version = 4;
node->value = value;
horrible_mask_self(node);
horrible_insert_ordered(table, node);
return 0;
}
static __init int
horrible_allowedips_insert_v6(struct horrible_allowedips *table,
struct in6_addr *ip, u8 cidr, void *value)
{
struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
node->ip.in6 = *ip;
node->mask = horrible_cidr_to_mask(cidr);
node->ip_version = 6;
node->value = value;
horrible_mask_self(node);
horrible_insert_ordered(table, node);
return 0;
}
static __init void *
horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
{
struct horrible_allowedips_node *node;
hlist_for_each_entry(node, &table->head, table) {
if (node->ip_version == 4 && horrible_match_v4(node, ip))
return node->value;
}
return NULL;
}
static __init void *
horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
{
struct horrible_allowedips_node *node;
hlist_for_each_entry(node, &table->head, table) {
if (node->ip_version == 6 && horrible_match_v6(node, ip))
return node->value;
}
return NULL;
}
static __init void
horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
{
struct horrible_allowedips_node *node;
struct hlist_node *h;
hlist_for_each_entry_safe(node, h, &table->head, table) {
if (node->value != value)
continue;
hlist_del(&node->table);
kfree(node);
}
}
static __init bool randomized_test(void)
{
unsigned int i, j, k, mutate_amount, cidr;
u8 ip[16], mutate_mask[16], mutated[16];
struct wg_peer **peers, *peer;
struct horrible_allowedips h;
DEFINE_MUTEX(mutex);
struct allowedips t;
bool ret = false;
mutex_init(&mutex);
wg_allowedips_init(&t);
horrible_allowedips_init(&h);
peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL);
if (unlikely(!peers)) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free;
}
for (i = 0; i < NUM_PEERS; ++i) {
peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL);
if (unlikely(!peers[i])) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free;
}
kref_init(&peers[i]->refcount);
INIT_LIST_HEAD(&peers[i]->allowedips_list);
}
mutex_lock(&mutex);
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
get_random_bytes(ip, 4);
cidr = get_random_u32_inclusive(1, 32);
peer = peers[get_random_u32_below(NUM_PEERS)];
if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
peer, &mutex) < 0) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip,
cidr, peer) < 0) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
memcpy(mutated, ip, 4);
get_random_bytes(mutate_mask, 4);
mutate_amount = get_random_u32_below(32);
for (k = 0; k < mutate_amount / 8; ++k)
mutate_mask[k] = 0xff;
mutate_mask[k] = 0xff
<< ((8 - (mutate_amount % 8)) % 8);
for (; k < 4; ++k)
mutate_mask[k] = 0;
for (k = 0; k < 4; ++k)
mutated[k] = (mutated[k] & mutate_mask[k]) |
(~mutate_mask[k] &
get_random_u8());
cidr = get_random_u32_inclusive(1, 32);
peer = peers[get_random_u32_below(NUM_PEERS)];
if (wg_allowedips_insert_v4(&t,
(struct in_addr *)mutated,
cidr, peer, &mutex) < 0) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
if (horrible_allowedips_insert_v4(&h,
(struct in_addr *)mutated, cidr, peer)) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
}
}
for (i = 0; i < NUM_RAND_ROUTES; ++i) {
get_random_bytes(ip, 16);
cidr = get_random_u32_inclusive(1, 128);
peer = peers[get_random_u32_below(NUM_PEERS)];
if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
peer, &mutex) < 0) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip,
cidr, peer) < 0) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
memcpy(mutated, ip, 16);
get_random_bytes(mutate_mask, 16);
mutate_amount = get_random_u32_below(128);
for (k = 0; k < mutate_amount / 8; ++k)
mutate_mask[k] = 0xff;
mutate_mask[k] = 0xff
<< ((8 - (mutate_amount % 8)) % 8);
for (; k < 4; ++k)
mutate_mask[k] = 0;
for (k = 0; k < 4; ++k)
mutated[k] = (mutated[k] & mutate_mask[k]) |
(~mutate_mask[k] &
get_random_u8());
cidr = get_random_u32_inclusive(1, 128);
peer = peers[get_random_u32_below(NUM_PEERS)];
if (wg_allowedips_insert_v6(&t,
(struct in6_addr *)mutated,
cidr, peer, &mutex) < 0) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
if (horrible_allowedips_insert_v6(
&h, (struct in6_addr *)mutated, cidr,
peer)) {
pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
}
}
mutex_unlock(&mutex);
if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
print_tree(t.root4, 32);
print_tree(t.root6, 128);
}
for (j = 0;; ++j) {
for (i = 0; i < NUM_QUERIES; ++i) {
get_random_bytes(ip, 4);
if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
pr_err("allowedips random v4 self-test: FAIL\n");
goto free;
}
get_random_bytes(ip, 16);
if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
pr_err("allowedips random v6 self-test: FAIL\n");
goto free;
}
}
if (j >= NUM_PEERS)
break;
mutex_lock(&mutex);
wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
mutex_unlock(&mutex);
horrible_allowedips_remove_by_value(&h, peers[j]);
}
if (t.root4 || t.root6) {
pr_err("allowedips random self-test removal: FAIL\n");
goto free;
}
ret = true;
free:
mutex_lock(&mutex);
free_locked:
wg_allowedips_free(&t, &mutex);
mutex_unlock(&mutex);
horrible_allowedips_free(&h);
if (peers) {
for (i = 0; i < NUM_PEERS; ++i)
kfree(peers[i]);
}
kfree(peers);
return ret;
}
static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d)
{
static struct in_addr ip;
u8 *split = (u8 *)&ip;
split[0] = a;
split[1] = b;
split[2] = c;
split[3] = d;
return &ip;
}
static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d)
{
static struct in6_addr ip;
__be32 *split = (__be32 *)&ip;
split[0] = cpu_to_be32(a);
split[1] = cpu_to_be32(b);
split[2] = cpu_to_be32(c);
split[3] = cpu_to_be32(d);
return &ip;
}
static __init struct wg_peer *init_peer(void)
{
struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL);
if (!peer)
return NULL;
kref_init(&peer->refcount);
INIT_LIST_HEAD(&peer->allowedips_list);
return peer;
}
#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \
wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
cidr, mem, &mutex)
#define maybe_fail() do { \
++i; \
if (!_s) { \
pr_info("allowedips self-test %zu: FAIL\n", i); \
success = false; \
} \
} while (0)
#define test(version, mem, ipa, ipb, ipc, ipd) do { \
bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
ip##version(ipa, ipb, ipc, ipd)) == (mem); \
maybe_fail(); \
} while (0)
#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \
bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
ip##version(ipa, ipb, ipc, ipd)) != (mem); \
maybe_fail(); \
} while (0)
#define test_boolean(cond) do { \
bool _s = (cond); \
maybe_fail(); \
} while (0)
bool __init wg_allowedips_selftest(void)
{
bool found_a = false, found_b = false, found_c = false, found_d = false,
found_e = false, found_other = false;
struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(),
*d = init_peer(), *e = init_peer(), *f = init_peer(),
*g = init_peer(), *h = init_peer();
struct allowedips_node *iter_node;
bool success = false;
struct allowedips t;
DEFINE_MUTEX(mutex);
struct in6_addr ip;
size_t i = 0, count = 0;
__be64 part;
mutex_init(&mutex);
mutex_lock(&mutex);
wg_allowedips_init(&t);
if (!a || !b || !c || !d || !e || !f || !g || !h) {
pr_err("allowedips self-test malloc: FAIL\n");
goto free;
}
insert(4, a, 192, 168, 4, 0, 24);
insert(4, b, 192, 168, 4, 4, 32);
insert(4, c, 192, 168, 0, 0, 16);
insert(4, d, 192, 95, 5, 64, 27);
/* replaces previous entry, and maskself is required */
insert(4, c, 192, 95, 5, 65, 27);
insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64);
insert(4, e, 0, 0, 0, 0, 0);
insert(6, e, 0, 0, 0, 0, 0);
/* replaces previous entry */
insert(6, f, 0, 0, 0, 0, 0);
insert(6, g, 0x24046800, 0, 0, 0, 32);
/* maskself is required */
insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64);
insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128);
insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
insert(4, g, 64, 15, 112, 0, 20);
/* maskself is required */
insert(4, h, 64, 15, 123, 211, 25);
insert(4, a, 10, 0, 0, 0, 25);
insert(4, b, 10, 0, 0, 128, 25);
insert(4, a, 10, 1, 0, 0, 30);
insert(4, b, 10, 1, 0, 4, 30);
insert(4, c, 10, 1, 0, 8, 29);
insert(4, d, 10, 1, 0, 16, 29);
if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
print_tree(t.root4, 32);
print_tree(t.root6, 128);
}
success = true;
test(4, a, 192, 168, 4, 20);
test(4, a, 192, 168, 4, 0);
test(4, b, 192, 168, 4, 4);
test(4, c, 192, 168, 200, 182);
test(4, c, 192, 95, 5, 68);
test(4, e, 192, 95, 5, 96);
test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543);
test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee);
test(6, f, 0x26075300, 0x60006b01, 0, 0);
test(6, g, 0x24046800, 0x40040806, 0, 0x1006);
test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678);
test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678);
test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678);
test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678);
test(6, h, 0x24046800, 0x40040800, 0, 0);
test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010);
test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef);
test(4, g, 64, 15, 116, 26);
test(4, g, 64, 15, 127, 3);
test(4, g, 64, 15, 123, 1);
test(4, h, 64, 15, 123, 128);
test(4, h, 64, 15, 123, 129);
test(4, a, 10, 0, 0, 52);
test(4, b, 10, 0, 0, 220);
test(4, a, 10, 1, 0, 2);
test(4, b, 10, 1, 0, 6);
test(4, c, 10, 1, 0, 10);
test(4, d, 10, 1, 0, 20);
insert(4, a, 1, 0, 0, 0, 32);
insert(4, a, 64, 0, 0, 0, 32);
insert(4, a, 128, 0, 0, 0, 32);
insert(4, a, 192, 0, 0, 0, 32);
insert(4, a, 255, 0, 0, 0, 32);
wg_allowedips_remove_by_peer(&t, a, &mutex);
test_negative(4, a, 1, 0, 0, 0);
test_negative(4, a, 64, 0, 0, 0);
test_negative(4, a, 128, 0, 0, 0);
test_negative(4, a, 192, 0, 0, 0);
test_negative(4, a, 255, 0, 0, 0);
wg_allowedips_free(&t, &mutex);
wg_allowedips_init(&t);
insert(4, a, 192, 168, 0, 0, 16);
insert(4, a, 192, 168, 0, 0, 24);
wg_allowedips_remove_by_peer(&t, a, &mutex);
test_negative(4, a, 192, 168, 0, 1);
/* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node
* if something goes wrong.
*/
for (i = 0; i < 64; ++i) {
part = cpu_to_be64(~0LLU << i);
memset(&ip, 0xff, 8);
memcpy((u8 *)&ip + 8, &part, 8);
wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
memcpy(&ip, &part, 8);
memset((u8 *)&ip + 8, 0, 8);
wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
}
memset(&ip, 0, 16);
wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
wg_allowedips_free(&t, &mutex);
wg_allowedips_init(&t);
insert(4, a, 192, 95, 5, 93, 27);
insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
insert(4, a, 10, 1, 0, 20, 29);
insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
list_for_each_entry(iter_node, &a->allowedips_list, peer_list) {
u8 cidr, ip[16] __aligned(__alignof(u64));
int family = wg_allowedips_read_node(iter_node, ip, &cidr);
count++;
if (cidr == 27 && family == AF_INET &&
!memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr)))
found_a = true;
else if (cidr == 128 && family == AF_INET6 &&
!memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543),
sizeof(struct in6_addr)))
found_b = true;
else if (cidr == 29 && family == AF_INET &&
!memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr)))
found_c = true;
else if (cidr == 83 && family == AF_INET6 &&
!memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0),
sizeof(struct in6_addr)))
found_d = true;
else if (cidr == 21 && family == AF_INET6 &&
!memcmp(ip, ip6(0x26075000, 0, 0, 0),
sizeof(struct in6_addr)))
found_e = true;
else
found_other = true;
}
test_boolean(count == 5);
test_boolean(found_a);
test_boolean(found_b);
test_boolean(found_c);
test_boolean(found_d);
test_boolean(found_e);
test_boolean(!found_other);
if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success)
success = randomized_test();
if (success)
pr_info("allowedips self-tests: pass\n");
free:
wg_allowedips_free(&t, &mutex);
kfree(a);
kfree(b);
kfree(c);
kfree(d);
kfree(e);
kfree(f);
kfree(g);
kfree(h);
mutex_unlock(&mutex);
return success;
}
#undef test_negative
#undef test
#undef remove
#undef insert
#undef init_peer
#endif
|
linux-master
|
drivers/net/wireguard/selftest/allowedips.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#ifdef DEBUG
bool __init wg_packet_counter_selftest(void)
{
struct noise_replay_counter *counter;
unsigned int test_num = 0, i;
bool success = true;
counter = kmalloc(sizeof(*counter), GFP_KERNEL);
if (unlikely(!counter)) {
pr_err("nonce counter self-test malloc: FAIL\n");
return false;
}
#define T_INIT do { \
memset(counter, 0, sizeof(*counter)); \
spin_lock_init(&counter->lock); \
} while (0)
#define T_LIM (COUNTER_WINDOW_SIZE + 1)
#define T(n, v) do { \
++test_num; \
if (counter_validate(counter, n) != (v)) { \
pr_err("nonce counter self-test %u: FAIL\n", \
test_num); \
success = false; \
} \
} while (0)
T_INIT;
/* 1 */ T(0, true);
/* 2 */ T(1, true);
/* 3 */ T(1, false);
/* 4 */ T(9, true);
/* 5 */ T(8, true);
/* 6 */ T(7, true);
/* 7 */ T(7, false);
/* 8 */ T(T_LIM, true);
/* 9 */ T(T_LIM - 1, true);
/* 10 */ T(T_LIM - 1, false);
/* 11 */ T(T_LIM - 2, true);
/* 12 */ T(2, true);
/* 13 */ T(2, false);
/* 14 */ T(T_LIM + 16, true);
/* 15 */ T(3, false);
/* 16 */ T(T_LIM + 16, false);
/* 17 */ T(T_LIM * 4, true);
/* 18 */ T(T_LIM * 4 - (T_LIM - 1), true);
/* 19 */ T(10, false);
/* 20 */ T(T_LIM * 4 - T_LIM, false);
/* 21 */ T(T_LIM * 4 - (T_LIM + 1), false);
/* 22 */ T(T_LIM * 4 - (T_LIM - 2), true);
/* 23 */ T(T_LIM * 4 + 1 - T_LIM, false);
/* 24 */ T(0, false);
/* 25 */ T(REJECT_AFTER_MESSAGES, false);
/* 26 */ T(REJECT_AFTER_MESSAGES - 1, true);
/* 27 */ T(REJECT_AFTER_MESSAGES, false);
/* 28 */ T(REJECT_AFTER_MESSAGES - 1, false);
/* 29 */ T(REJECT_AFTER_MESSAGES - 2, true);
/* 30 */ T(REJECT_AFTER_MESSAGES + 1, false);
/* 31 */ T(REJECT_AFTER_MESSAGES + 2, false);
/* 32 */ T(REJECT_AFTER_MESSAGES - 2, false);
/* 33 */ T(REJECT_AFTER_MESSAGES - 3, true);
/* 34 */ T(0, false);
T_INIT;
for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i)
T(i, true);
T(0, true);
T(0, false);
T_INIT;
for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i)
T(i, true);
T(1, true);
T(0, false);
T_INIT;
for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;)
T(i, true);
T_INIT;
for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;)
T(i, true);
T(0, false);
T_INIT;
for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;)
T(i, true);
T(COUNTER_WINDOW_SIZE + 1, true);
T(0, false);
T_INIT;
for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;)
T(i, true);
T(0, true);
T(COUNTER_WINDOW_SIZE + 1, true);
#undef T
#undef T_LIM
#undef T_INIT
if (success)
pr_info("nonce counter self-tests: pass\n");
kfree(counter);
return success;
}
#endif
|
linux-master
|
drivers/net/wireguard/selftest/counter.c
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
*/
#ifdef DEBUG
#include <linux/jiffies.h>
static const struct {
bool result;
unsigned int msec_to_sleep_before;
} expected_results[] __initconst = {
[0 ... PACKETS_BURSTABLE - 1] = { true, 0 },
[PACKETS_BURSTABLE] = { false, 0 },
[PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND },
[PACKETS_BURSTABLE + 2] = { false, 0 },
[PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 },
[PACKETS_BURSTABLE + 4] = { true, 0 },
[PACKETS_BURSTABLE + 5] = { false, 0 }
};
static __init unsigned int maximum_jiffies_at_index(int index)
{
unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3;
int i;
for (i = 0; i <= index; ++i)
total_msecs += expected_results[i].msec_to_sleep_before;
return msecs_to_jiffies(total_msecs);
}
static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4,
struct sk_buff *skb6, struct ipv6hdr *hdr6,
int *test)
{
unsigned long loop_start_time;
int i;
wg_ratelimiter_gc_entries(NULL);
rcu_barrier();
loop_start_time = jiffies;
for (i = 0; i < ARRAY_SIZE(expected_results); ++i) {
if (expected_results[i].msec_to_sleep_before)
msleep(expected_results[i].msec_to_sleep_before);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
return -ETIMEDOUT;
if (wg_ratelimiter_allow(skb4, &init_net) !=
expected_results[i].result)
return -EXFULL;
++(*test);
hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
return -ETIMEDOUT;
if (!wg_ratelimiter_allow(skb4, &init_net))
return -EXFULL;
++(*test);
hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1);
#if IS_ENABLED(CONFIG_IPV6)
hdr6->saddr.in6_u.u6_addr32[2] = htonl(i);
hdr6->saddr.in6_u.u6_addr32[3] = htonl(i);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
return -ETIMEDOUT;
if (wg_ratelimiter_allow(skb6, &init_net) !=
expected_results[i].result)
return -EXFULL;
++(*test);
hdr6->saddr.in6_u.u6_addr32[0] =
htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
return -ETIMEDOUT;
if (!wg_ratelimiter_allow(skb6, &init_net))
return -EXFULL;
++(*test);
hdr6->saddr.in6_u.u6_addr32[0] =
htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1);
if (time_is_before_jiffies(loop_start_time +
maximum_jiffies_at_index(i)))
return -ETIMEDOUT;
#endif
}
return 0;
}
static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4,
int *test)
{
int i;
wg_ratelimiter_gc_entries(NULL);
rcu_barrier();
if (atomic_read(&total_entries))
return -EXFULL;
++(*test);
for (i = 0; i <= max_entries; ++i) {
hdr4->saddr = htonl(i);
if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries))
return -EXFULL;
++(*test);
}
return 0;
}
bool __init wg_ratelimiter_selftest(void)
{
enum { TRIALS_BEFORE_GIVING_UP = 5000 };
bool success = false;
int test = 0, trials;
struct sk_buff *skb4, *skb6 = NULL;
struct iphdr *hdr4;
struct ipv6hdr *hdr6 = NULL;
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0);
if (wg_ratelimiter_init())
goto out;
++test;
if (wg_ratelimiter_init()) {
wg_ratelimiter_uninit();
goto out;
}
++test;
if (wg_ratelimiter_init()) {
wg_ratelimiter_uninit();
wg_ratelimiter_uninit();
goto out;
}
++test;
skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL);
if (unlikely(!skb4))
goto err_nofree;
skb4->protocol = htons(ETH_P_IP);
hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4));
hdr4->saddr = htonl(8182);
skb_reset_network_header(skb4);
++test;
#if IS_ENABLED(CONFIG_IPV6)
skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL);
if (unlikely(!skb6)) {
kfree_skb(skb4);
goto err_nofree;
}
skb6->protocol = htons(ETH_P_IPV6);
hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6));
hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212);
hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188);
skb_reset_network_header(skb6);
++test;
#endif
for (trials = TRIALS_BEFORE_GIVING_UP; IS_ENABLED(DEBUG_RATELIMITER_TIMINGS);) {
int test_count = 0, ret;
ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count);
if (ret == -ETIMEDOUT) {
if (!trials--) {
test += test_count;
goto err;
}
continue;
} else if (ret < 0) {
test += test_count;
goto err;
} else {
test += test_count;
break;
}
}
for (trials = TRIALS_BEFORE_GIVING_UP;;) {
int test_count = 0;
if (capacity_test(skb4, hdr4, &test_count) < 0) {
if (!trials--) {
test += test_count;
goto err;
}
continue;
}
test += test_count;
break;
}
success = true;
err:
kfree_skb(skb4);
#if IS_ENABLED(CONFIG_IPV6)
kfree_skb(skb6);
#endif
err_nofree:
wg_ratelimiter_uninit();
wg_ratelimiter_uninit();
wg_ratelimiter_uninit();
/* Uninit one extra time to check underflow detection. */
wg_ratelimiter_uninit();
out:
if (success)
pr_info("ratelimiter self-tests: pass\n");
else
pr_err("ratelimiter self-test %d: FAIL\n", test);
return success;
}
#endif
|
linux-master
|
drivers/net/wireguard/selftest/ratelimiter.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021, Linaro Ltd <[email protected]> */
#include <linux/kernel.h>
#include <linux/mhi.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/wwan.h>
/* MHI wwan flags */
enum mhi_wwan_flags {
MHI_WWAN_DL_CAP,
MHI_WWAN_UL_CAP,
MHI_WWAN_RX_REFILL,
};
#define MHI_WWAN_MAX_MTU 0x8000
struct mhi_wwan_dev {
/* Lower level is a mhi dev, upper level is a wwan port */
struct mhi_device *mhi_dev;
struct wwan_port *wwan_port;
/* State and capabilities */
unsigned long flags;
size_t mtu;
/* Protect against concurrent TX and TX-completion (bh) */
spinlock_t tx_lock;
/* Protect RX budget and rx_refill scheduling */
spinlock_t rx_lock;
struct work_struct rx_refill;
/* RX budget is initially set to the size of the MHI RX queue and is
* used to limit the number of allocated and queued packets. It is
* decremented on data queueing and incremented on data release.
*/
unsigned int rx_budget;
};
/* Increment RX budget and schedule RX refill if necessary */
static void mhi_wwan_rx_budget_inc(struct mhi_wwan_dev *mhiwwan)
{
spin_lock_bh(&mhiwwan->rx_lock);
mhiwwan->rx_budget++;
if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
schedule_work(&mhiwwan->rx_refill);
spin_unlock_bh(&mhiwwan->rx_lock);
}
/* Decrement RX budget if non-zero and return true on success */
static bool mhi_wwan_rx_budget_dec(struct mhi_wwan_dev *mhiwwan)
{
bool ret = false;
spin_lock_bh(&mhiwwan->rx_lock);
if (mhiwwan->rx_budget) {
mhiwwan->rx_budget--;
if (test_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags))
ret = true;
}
spin_unlock_bh(&mhiwwan->rx_lock);
return ret;
}
static void __mhi_skb_destructor(struct sk_buff *skb)
{
/* RX buffer has been consumed, increase the allowed budget */
mhi_wwan_rx_budget_inc(skb_shinfo(skb)->destructor_arg);
}
static void mhi_wwan_ctrl_refill_work(struct work_struct *work)
{
struct mhi_wwan_dev *mhiwwan = container_of(work, struct mhi_wwan_dev, rx_refill);
struct mhi_device *mhi_dev = mhiwwan->mhi_dev;
while (mhi_wwan_rx_budget_dec(mhiwwan)) {
struct sk_buff *skb;
skb = alloc_skb(mhiwwan->mtu, GFP_KERNEL);
if (!skb) {
mhi_wwan_rx_budget_inc(mhiwwan);
break;
}
/* To prevent unlimited buffer allocation if nothing consumes
* the RX buffers (passed to WWAN core), track their lifespan
* to not allocate more than allowed budget.
*/
skb->destructor = __mhi_skb_destructor;
skb_shinfo(skb)->destructor_arg = mhiwwan;
if (mhi_queue_skb(mhi_dev, DMA_FROM_DEVICE, skb, mhiwwan->mtu, MHI_EOT)) {
dev_err(&mhi_dev->dev, "Failed to queue buffer\n");
kfree_skb(skb);
break;
}
}
}
static int mhi_wwan_ctrl_start(struct wwan_port *port)
{
struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
int ret;
/* Start mhi device's channel(s) */
ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev);
if (ret)
return ret;
/* Don't allocate more buffers than MHI channel queue size */
mhiwwan->rx_budget = mhi_get_free_desc_count(mhiwwan->mhi_dev, DMA_FROM_DEVICE);
/* Add buffers to the MHI inbound queue */
if (test_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags)) {
set_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
mhi_wwan_ctrl_refill_work(&mhiwwan->rx_refill);
}
return 0;
}
static void mhi_wwan_ctrl_stop(struct wwan_port *port)
{
struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
spin_lock_bh(&mhiwwan->rx_lock);
clear_bit(MHI_WWAN_RX_REFILL, &mhiwwan->flags);
spin_unlock_bh(&mhiwwan->rx_lock);
cancel_work_sync(&mhiwwan->rx_refill);
mhi_unprepare_from_transfer(mhiwwan->mhi_dev);
}
static int mhi_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct mhi_wwan_dev *mhiwwan = wwan_port_get_drvdata(port);
int ret;
if (skb->len > mhiwwan->mtu)
return -EMSGSIZE;
if (!test_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags))
return -EOPNOTSUPP;
/* Queue the packet for MHI transfer and check fullness of the queue */
spin_lock_bh(&mhiwwan->tx_lock);
ret = mhi_queue_skb(mhiwwan->mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
if (mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
wwan_port_txoff(port);
spin_unlock_bh(&mhiwwan->tx_lock);
return ret;
}
static const struct wwan_port_ops wwan_pops = {
.start = mhi_wwan_ctrl_start,
.stop = mhi_wwan_ctrl_stop,
.tx = mhi_wwan_ctrl_tx,
};
static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
struct wwan_port *port = mhiwwan->wwan_port;
struct sk_buff *skb = mhi_result->buf_addr;
dev_dbg(&mhi_dev->dev, "%s: status: %d xfer_len: %zu\n", __func__,
mhi_result->transaction_status, mhi_result->bytes_xferd);
/* MHI core has done with the buffer, release it */
consume_skb(skb);
/* There is likely new slot available in the MHI queue, re-allow TX */
spin_lock_bh(&mhiwwan->tx_lock);
if (!mhi_queue_is_full(mhiwwan->mhi_dev, DMA_TO_DEVICE))
wwan_port_txon(port);
spin_unlock_bh(&mhiwwan->tx_lock);
}
static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
struct mhi_result *mhi_result)
{
struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
struct wwan_port *port = mhiwwan->wwan_port;
struct sk_buff *skb = mhi_result->buf_addr;
dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__,
mhi_result->transaction_status, mhi_result->bytes_xferd);
if (mhi_result->transaction_status &&
mhi_result->transaction_status != -EOVERFLOW) {
kfree_skb(skb);
return;
}
/* MHI core does not update skb->len, do it before forward */
skb_put(skb, mhi_result->bytes_xferd);
wwan_port_rx(port, skb);
/* Do not increment rx budget nor refill RX buffers now, wait for the
* buffer to be consumed. Done from __mhi_skb_destructor().
*/
}
static int mhi_wwan_ctrl_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
struct mhi_wwan_dev *mhiwwan;
struct wwan_port *port;
mhiwwan = kzalloc(sizeof(*mhiwwan), GFP_KERNEL);
if (!mhiwwan)
return -ENOMEM;
mhiwwan->mhi_dev = mhi_dev;
mhiwwan->mtu = MHI_WWAN_MAX_MTU;
INIT_WORK(&mhiwwan->rx_refill, mhi_wwan_ctrl_refill_work);
spin_lock_init(&mhiwwan->tx_lock);
spin_lock_init(&mhiwwan->rx_lock);
if (mhi_dev->dl_chan)
set_bit(MHI_WWAN_DL_CAP, &mhiwwan->flags);
if (mhi_dev->ul_chan)
set_bit(MHI_WWAN_UL_CAP, &mhiwwan->flags);
dev_set_drvdata(&mhi_dev->dev, mhiwwan);
/* Register as a wwan port, id->driver_data contains wwan port type */
port = wwan_create_port(&cntrl->mhi_dev->dev, id->driver_data,
&wwan_pops, NULL, mhiwwan);
if (IS_ERR(port)) {
kfree(mhiwwan);
return PTR_ERR(port);
}
mhiwwan->wwan_port = port;
return 0;
};
static void mhi_wwan_ctrl_remove(struct mhi_device *mhi_dev)
{
struct mhi_wwan_dev *mhiwwan = dev_get_drvdata(&mhi_dev->dev);
wwan_remove_port(mhiwwan->wwan_port);
kfree(mhiwwan);
}
static const struct mhi_device_id mhi_wwan_ctrl_match_table[] = {
{ .chan = "DUN", .driver_data = WWAN_PORT_AT },
{ .chan = "DUN2", .driver_data = WWAN_PORT_AT },
{ .chan = "MBIM", .driver_data = WWAN_PORT_MBIM },
{ .chan = "QMI", .driver_data = WWAN_PORT_QMI },
{ .chan = "DIAG", .driver_data = WWAN_PORT_QCDM },
{ .chan = "FIREHOSE", .driver_data = WWAN_PORT_FIREHOSE },
{},
};
MODULE_DEVICE_TABLE(mhi, mhi_wwan_ctrl_match_table);
static struct mhi_driver mhi_wwan_ctrl_driver = {
.id_table = mhi_wwan_ctrl_match_table,
.remove = mhi_wwan_ctrl_remove,
.probe = mhi_wwan_ctrl_probe,
.ul_xfer_cb = mhi_ul_xfer_cb,
.dl_xfer_cb = mhi_dl_xfer_cb,
.driver = {
.name = "mhi_wwan_ctrl",
},
};
module_mhi_driver(mhi_wwan_ctrl_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MHI WWAN CTRL Driver");
MODULE_AUTHOR("Loic Poulain <[email protected]>");
|
linux-master
|
drivers/net/wwan/mhi_wwan_ctrl.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* WWAN device simulator for WWAN framework testing.
*
* Copyright (c) 2021, Sergey Ryazanov <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wwan.h>
#include <linux/debugfs.h>
#include <linux/workqueue.h>
#include <net/arp.h>
static int wwan_hwsim_devsnum = 2;
module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
MODULE_PARM_DESC(devices, "Number of simulated devices");
static struct class *wwan_hwsim_class;
static struct dentry *wwan_hwsim_debugfs_topdir;
static struct dentry *wwan_hwsim_debugfs_devcreate;
static DEFINE_SPINLOCK(wwan_hwsim_devs_lock);
static LIST_HEAD(wwan_hwsim_devs);
static unsigned int wwan_hwsim_dev_idx;
static struct workqueue_struct *wwan_wq;
struct wwan_hwsim_dev {
struct list_head list;
unsigned int id;
struct device dev;
struct work_struct del_work;
struct dentry *debugfs_topdir;
struct dentry *debugfs_portcreate;
spinlock_t ports_lock; /* Serialize ports creation/deletion */
unsigned int port_idx;
struct list_head ports;
};
struct wwan_hwsim_port {
struct list_head list;
unsigned int id;
struct wwan_hwsim_dev *dev;
struct wwan_port *wwan;
struct work_struct del_work;
struct dentry *debugfs_topdir;
enum { /* AT command parser state */
AT_PARSER_WAIT_A,
AT_PARSER_WAIT_T,
AT_PARSER_WAIT_TERM,
AT_PARSER_SKIP_LINE,
} pstate;
};
static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops;
static const struct file_operations wwan_hwsim_debugfs_portcreate_fops;
static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops;
static void wwan_hwsim_port_del_work(struct work_struct *work);
static void wwan_hwsim_dev_del_work(struct work_struct *work);
static netdev_tx_t wwan_hwsim_netdev_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
consume_skb(skb);
return NETDEV_TX_OK;
}
static const struct net_device_ops wwan_hwsim_netdev_ops = {
.ndo_start_xmit = wwan_hwsim_netdev_xmit,
};
static void wwan_hwsim_netdev_setup(struct net_device *ndev)
{
ndev->netdev_ops = &wwan_hwsim_netdev_ops;
ndev->needs_free_netdev = true;
ndev->mtu = ETH_DATA_LEN;
ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = ETH_MAX_MTU;
ndev->type = ARPHRD_NONE;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
}
static const struct wwan_ops wwan_hwsim_wwan_rtnl_ops = {
.priv_size = 0, /* No private data */
.setup = wwan_hwsim_netdev_setup,
};
static int wwan_hwsim_port_start(struct wwan_port *wport)
{
struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
port->pstate = AT_PARSER_WAIT_A;
return 0;
}
static void wwan_hwsim_port_stop(struct wwan_port *wport)
{
}
/* Implements a minimalistic AT commands parser that echo input back and
* reply with 'OK' to each input command. See AT command protocol details in the
* ITU-T V.250 recomendations document.
*
* Be aware that this processor is not fully V.250 compliant.
*/
static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
{
struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
struct sk_buff *out;
int i, n, s;
/* Estimate a max possible number of commands by counting the number of
* termination chars (S3 param, CR by default). And then allocate the
* output buffer that will be enough to fit the echo and result codes of
* all commands.
*/
for (i = 0, n = 0; i < in->len; ++i)
if (in->data[i] == '\r')
n++;
n = in->len + n * (2 + 2 + 2); /* Output buffer size */
out = alloc_skb(n, GFP_KERNEL);
if (!out)
return -ENOMEM;
for (i = 0, s = 0; i < in->len; ++i) {
char c = in->data[i];
if (port->pstate == AT_PARSER_WAIT_A) {
if (c == 'A' || c == 'a')
port->pstate = AT_PARSER_WAIT_T;
else if (c != '\n') /* Ignore formating char */
port->pstate = AT_PARSER_SKIP_LINE;
} else if (port->pstate == AT_PARSER_WAIT_T) {
if (c == 'T' || c == 't')
port->pstate = AT_PARSER_WAIT_TERM;
else
port->pstate = AT_PARSER_SKIP_LINE;
} else if (port->pstate == AT_PARSER_WAIT_TERM) {
if (c != '\r')
continue;
/* Consume the trailing formatting char as well */
if ((i + 1) < in->len && in->data[i + 1] == '\n')
i++;
n = i - s + 1;
skb_put_data(out, &in->data[s], n);/* Echo */
skb_put_data(out, "\r\nOK\r\n", 6);
s = i + 1;
port->pstate = AT_PARSER_WAIT_A;
} else if (port->pstate == AT_PARSER_SKIP_LINE) {
if (c != '\r')
continue;
port->pstate = AT_PARSER_WAIT_A;
}
}
if (i > s) {
/* Echo the processed portion of a not yet completed command */
n = i - s;
skb_put_data(out, &in->data[s], n);
}
consume_skb(in);
wwan_port_rx(wport, out);
return 0;
}
static const struct wwan_port_ops wwan_hwsim_port_ops = {
.start = wwan_hwsim_port_start,
.stop = wwan_hwsim_port_stop,
.tx = wwan_hwsim_port_tx,
};
static struct wwan_hwsim_port *wwan_hwsim_port_new(struct wwan_hwsim_dev *dev)
{
struct wwan_hwsim_port *port;
char name[0x10];
int err;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return ERR_PTR(-ENOMEM);
port->dev = dev;
spin_lock(&dev->ports_lock);
port->id = dev->port_idx++;
spin_unlock(&dev->ports_lock);
port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT,
&wwan_hwsim_port_ops,
NULL, port);
if (IS_ERR(port->wwan)) {
err = PTR_ERR(port->wwan);
goto err_free_port;
}
INIT_WORK(&port->del_work, wwan_hwsim_port_del_work);
snprintf(name, sizeof(name), "port%u", port->id);
port->debugfs_topdir = debugfs_create_dir(name, dev->debugfs_topdir);
debugfs_create_file("destroy", 0200, port->debugfs_topdir, port,
&wwan_hwsim_debugfs_portdestroy_fops);
return port;
err_free_port:
kfree(port);
return ERR_PTR(err);
}
static void wwan_hwsim_port_del(struct wwan_hwsim_port *port)
{
debugfs_remove(port->debugfs_topdir);
/* Make sure that there is no pending deletion work */
if (current_work() != &port->del_work)
cancel_work_sync(&port->del_work);
wwan_remove_port(port->wwan);
kfree(port);
}
static void wwan_hwsim_port_del_work(struct work_struct *work)
{
struct wwan_hwsim_port *port =
container_of(work, typeof(*port), del_work);
struct wwan_hwsim_dev *dev = port->dev;
spin_lock(&dev->ports_lock);
if (list_empty(&port->list)) {
/* Someone else deleting port at the moment */
spin_unlock(&dev->ports_lock);
return;
}
list_del_init(&port->list);
spin_unlock(&dev->ports_lock);
wwan_hwsim_port_del(port);
}
static void wwan_hwsim_dev_release(struct device *sysdev)
{
struct wwan_hwsim_dev *dev = container_of(sysdev, typeof(*dev), dev);
kfree(dev);
}
static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
{
struct wwan_hwsim_dev *dev;
int err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
spin_lock(&wwan_hwsim_devs_lock);
dev->id = wwan_hwsim_dev_idx++;
spin_unlock(&wwan_hwsim_devs_lock);
dev->dev.release = wwan_hwsim_dev_release;
dev->dev.class = wwan_hwsim_class;
dev_set_name(&dev->dev, "hwsim%u", dev->id);
spin_lock_init(&dev->ports_lock);
INIT_LIST_HEAD(&dev->ports);
err = device_register(&dev->dev);
if (err)
goto err_free_dev;
INIT_WORK(&dev->del_work, wwan_hwsim_dev_del_work);
err = wwan_register_ops(&dev->dev, &wwan_hwsim_wwan_rtnl_ops, dev, 1);
if (err)
goto err_unreg_dev;
dev->debugfs_topdir = debugfs_create_dir(dev_name(&dev->dev),
wwan_hwsim_debugfs_topdir);
debugfs_create_file("destroy", 0200, dev->debugfs_topdir, dev,
&wwan_hwsim_debugfs_devdestroy_fops);
dev->debugfs_portcreate =
debugfs_create_file("portcreate", 0200,
dev->debugfs_topdir, dev,
&wwan_hwsim_debugfs_portcreate_fops);
return dev;
err_unreg_dev:
device_unregister(&dev->dev);
/* Memory will be freed in the device release callback */
return ERR_PTR(err);
err_free_dev:
put_device(&dev->dev);
return ERR_PTR(err);
}
static void wwan_hwsim_dev_del(struct wwan_hwsim_dev *dev)
{
debugfs_remove(dev->debugfs_portcreate); /* Avoid new ports */
spin_lock(&dev->ports_lock);
while (!list_empty(&dev->ports)) {
struct wwan_hwsim_port *port;
port = list_first_entry(&dev->ports, struct wwan_hwsim_port,
list);
list_del_init(&port->list);
spin_unlock(&dev->ports_lock);
wwan_hwsim_port_del(port);
spin_lock(&dev->ports_lock);
}
spin_unlock(&dev->ports_lock);
debugfs_remove(dev->debugfs_topdir);
/* This will remove all child netdev(s) */
wwan_unregister_ops(&dev->dev);
/* Make sure that there is no pending deletion work */
if (current_work() != &dev->del_work)
cancel_work_sync(&dev->del_work);
device_unregister(&dev->dev);
/* Memory will be freed in the device release callback */
}
static void wwan_hwsim_dev_del_work(struct work_struct *work)
{
struct wwan_hwsim_dev *dev = container_of(work, typeof(*dev), del_work);
spin_lock(&wwan_hwsim_devs_lock);
if (list_empty(&dev->list)) {
/* Someone else deleting device at the moment */
spin_unlock(&wwan_hwsim_devs_lock);
return;
}
list_del_init(&dev->list);
spin_unlock(&wwan_hwsim_devs_lock);
wwan_hwsim_dev_del(dev);
}
static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file,
const char __user *usrbuf,
size_t count, loff_t *ppos)
{
struct wwan_hwsim_port *port = file->private_data;
/* We can not delete port here since it will cause a deadlock due to
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
queue_work(wwan_wq, &port->del_work);
return count;
}
static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops = {
.write = wwan_hwsim_debugfs_portdestroy_write,
.open = simple_open,
.llseek = noop_llseek,
};
static ssize_t wwan_hwsim_debugfs_portcreate_write(struct file *file,
const char __user *usrbuf,
size_t count, loff_t *ppos)
{
struct wwan_hwsim_dev *dev = file->private_data;
struct wwan_hwsim_port *port;
port = wwan_hwsim_port_new(dev);
if (IS_ERR(port))
return PTR_ERR(port);
spin_lock(&dev->ports_lock);
list_add_tail(&port->list, &dev->ports);
spin_unlock(&dev->ports_lock);
return count;
}
static const struct file_operations wwan_hwsim_debugfs_portcreate_fops = {
.write = wwan_hwsim_debugfs_portcreate_write,
.open = simple_open,
.llseek = noop_llseek,
};
static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file,
const char __user *usrbuf,
size_t count, loff_t *ppos)
{
struct wwan_hwsim_dev *dev = file->private_data;
/* We can not delete device here since it will cause a deadlock due to
* waiting this callback to finish in the debugfs_remove() call. So,
* use workqueue.
*/
queue_work(wwan_wq, &dev->del_work);
return count;
}
static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops = {
.write = wwan_hwsim_debugfs_devdestroy_write,
.open = simple_open,
.llseek = noop_llseek,
};
static ssize_t wwan_hwsim_debugfs_devcreate_write(struct file *file,
const char __user *usrbuf,
size_t count, loff_t *ppos)
{
struct wwan_hwsim_dev *dev;
dev = wwan_hwsim_dev_new();
if (IS_ERR(dev))
return PTR_ERR(dev);
spin_lock(&wwan_hwsim_devs_lock);
list_add_tail(&dev->list, &wwan_hwsim_devs);
spin_unlock(&wwan_hwsim_devs_lock);
return count;
}
static const struct file_operations wwan_hwsim_debugfs_devcreate_fops = {
.write = wwan_hwsim_debugfs_devcreate_write,
.open = simple_open,
.llseek = noop_llseek,
};
static int __init wwan_hwsim_init_devs(void)
{
struct wwan_hwsim_dev *dev;
int i, j;
for (i = 0; i < wwan_hwsim_devsnum; ++i) {
dev = wwan_hwsim_dev_new();
if (IS_ERR(dev))
return PTR_ERR(dev);
spin_lock(&wwan_hwsim_devs_lock);
list_add_tail(&dev->list, &wwan_hwsim_devs);
spin_unlock(&wwan_hwsim_devs_lock);
/* Create a couple of ports per each device to accelerate
* the simulator readiness time.
*/
for (j = 0; j < 2; ++j) {
struct wwan_hwsim_port *port;
port = wwan_hwsim_port_new(dev);
if (IS_ERR(port))
return PTR_ERR(port);
spin_lock(&dev->ports_lock);
list_add_tail(&port->list, &dev->ports);
spin_unlock(&dev->ports_lock);
}
}
return 0;
}
static void wwan_hwsim_free_devs(void)
{
struct wwan_hwsim_dev *dev;
spin_lock(&wwan_hwsim_devs_lock);
while (!list_empty(&wwan_hwsim_devs)) {
dev = list_first_entry(&wwan_hwsim_devs, struct wwan_hwsim_dev,
list);
list_del_init(&dev->list);
spin_unlock(&wwan_hwsim_devs_lock);
wwan_hwsim_dev_del(dev);
spin_lock(&wwan_hwsim_devs_lock);
}
spin_unlock(&wwan_hwsim_devs_lock);
}
static int __init wwan_hwsim_init(void)
{
int err;
if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
return -EINVAL;
wwan_wq = alloc_workqueue("wwan_wq", 0, 0);
if (!wwan_wq)
return -ENOMEM;
wwan_hwsim_class = class_create("wwan_hwsim");
if (IS_ERR(wwan_hwsim_class)) {
err = PTR_ERR(wwan_hwsim_class);
goto err_wq_destroy;
}
wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
wwan_hwsim_debugfs_devcreate =
debugfs_create_file("devcreate", 0200,
wwan_hwsim_debugfs_topdir, NULL,
&wwan_hwsim_debugfs_devcreate_fops);
err = wwan_hwsim_init_devs();
if (err)
goto err_clean_devs;
return 0;
err_clean_devs:
debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
err_wq_destroy:
destroy_workqueue(wwan_wq);
return err;
}
static void __exit wwan_hwsim_exit(void)
{
debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
wwan_hwsim_free_devs();
flush_workqueue(wwan_wq); /* Wait deletion works completion */
debugfs_remove(wwan_hwsim_debugfs_topdir);
class_destroy(wwan_hwsim_class);
destroy_workqueue(wwan_wq);
}
module_init(wwan_hwsim_init);
module_exit(wwan_hwsim_exit);
MODULE_AUTHOR("Sergey Ryazanov");
MODULE_DESCRIPTION("Device simulator for WWAN framework");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/net/wwan/wwan_hwsim.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021, Stephan Gerhold <[email protected]> */
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rpmsg.h>
#include <linux/wwan.h>
struct rpmsg_wwan_dev {
/* Lower level is a rpmsg dev, upper level is a wwan port */
struct rpmsg_device *rpdev;
struct wwan_port *wwan_port;
struct rpmsg_endpoint *ept;
};
static int rpmsg_wwan_ctrl_callback(struct rpmsg_device *rpdev,
void *buf, int len, void *priv, u32 src)
{
struct rpmsg_wwan_dev *rpwwan = priv;
struct sk_buff *skb;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
skb_put_data(skb, buf, len);
wwan_port_rx(rpwwan->wwan_port, skb);
return 0;
}
static int rpmsg_wwan_ctrl_start(struct wwan_port *port)
{
struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
struct rpmsg_channel_info chinfo = {
.src = rpwwan->rpdev->src,
.dst = RPMSG_ADDR_ANY,
};
strncpy(chinfo.name, rpwwan->rpdev->id.name, RPMSG_NAME_SIZE);
rpwwan->ept = rpmsg_create_ept(rpwwan->rpdev, rpmsg_wwan_ctrl_callback,
rpwwan, chinfo);
if (!rpwwan->ept)
return -EREMOTEIO;
return 0;
}
static void rpmsg_wwan_ctrl_stop(struct wwan_port *port)
{
struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
rpmsg_destroy_ept(rpwwan->ept);
rpwwan->ept = NULL;
}
static int rpmsg_wwan_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
int ret;
ret = rpmsg_trysend(rpwwan->ept, skb->data, skb->len);
if (ret)
return ret;
consume_skb(skb);
return 0;
}
static int rpmsg_wwan_ctrl_tx_blocking(struct wwan_port *port, struct sk_buff *skb)
{
struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
int ret;
ret = rpmsg_send(rpwwan->ept, skb->data, skb->len);
if (ret)
return ret;
consume_skb(skb);
return 0;
}
static __poll_t rpmsg_wwan_ctrl_tx_poll(struct wwan_port *port,
struct file *filp, poll_table *wait)
{
struct rpmsg_wwan_dev *rpwwan = wwan_port_get_drvdata(port);
return rpmsg_poll(rpwwan->ept, filp, wait);
}
static const struct wwan_port_ops rpmsg_wwan_pops = {
.start = rpmsg_wwan_ctrl_start,
.stop = rpmsg_wwan_ctrl_stop,
.tx = rpmsg_wwan_ctrl_tx,
.tx_blocking = rpmsg_wwan_ctrl_tx_blocking,
.tx_poll = rpmsg_wwan_ctrl_tx_poll,
};
static struct device *rpmsg_wwan_find_parent(struct device *dev)
{
/* Select first platform device as parent for the WWAN ports.
* On Qualcomm platforms this is usually the platform device that
* represents the modem remote processor. This might need to be
* adjusted when adding device IDs for other platforms.
*/
for (dev = dev->parent; dev; dev = dev->parent) {
if (dev_is_platform(dev))
return dev;
}
return NULL;
}
static int rpmsg_wwan_ctrl_probe(struct rpmsg_device *rpdev)
{
struct rpmsg_wwan_dev *rpwwan;
struct wwan_port *port;
struct device *parent;
parent = rpmsg_wwan_find_parent(&rpdev->dev);
if (!parent)
return -ENODEV;
rpwwan = devm_kzalloc(&rpdev->dev, sizeof(*rpwwan), GFP_KERNEL);
if (!rpwwan)
return -ENOMEM;
rpwwan->rpdev = rpdev;
dev_set_drvdata(&rpdev->dev, rpwwan);
/* Register as a wwan port, id.driver_data contains wwan port type */
port = wwan_create_port(parent, rpdev->id.driver_data,
&rpmsg_wwan_pops, NULL, rpwwan);
if (IS_ERR(port))
return PTR_ERR(port);
rpwwan->wwan_port = port;
return 0;
};
static void rpmsg_wwan_ctrl_remove(struct rpmsg_device *rpdev)
{
struct rpmsg_wwan_dev *rpwwan = dev_get_drvdata(&rpdev->dev);
wwan_remove_port(rpwwan->wwan_port);
}
static const struct rpmsg_device_id rpmsg_wwan_ctrl_id_table[] = {
/* RPMSG channels for Qualcomm SoCs with integrated modem */
{ .name = "DATA5_CNTL", .driver_data = WWAN_PORT_QMI },
{ .name = "DATA4", .driver_data = WWAN_PORT_AT },
{ .name = "DATA1", .driver_data = WWAN_PORT_AT },
{},
};
MODULE_DEVICE_TABLE(rpmsg, rpmsg_wwan_ctrl_id_table);
static struct rpmsg_driver rpmsg_wwan_ctrl_driver = {
.drv.name = "rpmsg_wwan_ctrl",
.id_table = rpmsg_wwan_ctrl_id_table,
.probe = rpmsg_wwan_ctrl_probe,
.remove = rpmsg_wwan_ctrl_remove,
};
module_rpmsg_driver(rpmsg_wwan_ctrl_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("RPMSG WWAN CTRL Driver");
MODULE_AUTHOR("Stephan Gerhold <[email protected]>");
|
linux-master
|
drivers/net/wwan/rpmsg_wwan_ctrl.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm BAM-DMUX WWAN network driver
* Copyright (c) 2020, Stephan Gerhold <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/if_arp.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/pkt_sched.h>
#define BAM_DMUX_BUFFER_SIZE SZ_2K
#define BAM_DMUX_HDR_SIZE sizeof(struct bam_dmux_hdr)
#define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
#define BAM_DMUX_NUM_SKB 32
#define BAM_DMUX_HDR_MAGIC 0x33fc
#define BAM_DMUX_AUTOSUSPEND_DELAY 1000
#define BAM_DMUX_REMOTE_TIMEOUT msecs_to_jiffies(2000)
enum {
BAM_DMUX_CMD_DATA,
BAM_DMUX_CMD_OPEN,
BAM_DMUX_CMD_CLOSE,
};
enum {
BAM_DMUX_CH_DATA_0,
BAM_DMUX_CH_DATA_1,
BAM_DMUX_CH_DATA_2,
BAM_DMUX_CH_DATA_3,
BAM_DMUX_CH_DATA_4,
BAM_DMUX_CH_DATA_5,
BAM_DMUX_CH_DATA_6,
BAM_DMUX_CH_DATA_7,
BAM_DMUX_NUM_CH
};
struct bam_dmux_hdr {
u16 magic;
u8 signal;
u8 cmd;
u8 pad;
u8 ch;
u16 len;
};
struct bam_dmux_skb_dma {
struct bam_dmux *dmux;
struct sk_buff *skb;
dma_addr_t addr;
};
struct bam_dmux {
struct device *dev;
int pc_irq;
bool pc_state, pc_ack_state;
struct qcom_smem_state *pc, *pc_ack;
u32 pc_mask, pc_ack_mask;
wait_queue_head_t pc_wait;
struct completion pc_ack_completion;
struct dma_chan *rx, *tx;
struct bam_dmux_skb_dma rx_skbs[BAM_DMUX_NUM_SKB];
struct bam_dmux_skb_dma tx_skbs[BAM_DMUX_NUM_SKB];
spinlock_t tx_lock; /* Protect tx_skbs, tx_next_skb */
unsigned int tx_next_skb;
atomic_long_t tx_deferred_skb;
struct work_struct tx_wakeup_work;
DECLARE_BITMAP(remote_channels, BAM_DMUX_NUM_CH);
struct work_struct register_netdev_work;
struct net_device *netdevs[BAM_DMUX_NUM_CH];
};
struct bam_dmux_netdev {
struct bam_dmux *dmux;
u8 ch;
};
static void bam_dmux_pc_vote(struct bam_dmux *dmux, bool enable)
{
reinit_completion(&dmux->pc_ack_completion);
qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask,
enable ? dmux->pc_mask : 0);
}
static void bam_dmux_pc_ack(struct bam_dmux *dmux)
{
qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask,
dmux->pc_ack_state ? 0 : dmux->pc_ack_mask);
dmux->pc_ack_state = !dmux->pc_ack_state;
}
static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma *skb_dma,
enum dma_data_direction dir)
{
struct device *dev = skb_dma->dmux->dev;
skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir);
if (dma_mapping_error(dev, skb_dma->addr)) {
dev_err(dev, "Failed to DMA map buffer\n");
skb_dma->addr = 0;
return false;
}
return true;
}
static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma *skb_dma,
enum dma_data_direction dir)
{
dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir);
skb_dma->addr = 0;
}
static void bam_dmux_tx_wake_queues(struct bam_dmux *dmux)
{
int i;
dev_dbg(dmux->dev, "wake queues\n");
for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
struct net_device *netdev = dmux->netdevs[i];
if (netdev && netif_running(netdev))
netif_wake_queue(netdev);
}
}
static void bam_dmux_tx_stop_queues(struct bam_dmux *dmux)
{
int i;
dev_dbg(dmux->dev, "stop queues\n");
for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
struct net_device *netdev = dmux->netdevs[i];
if (netdev)
netif_stop_queue(netdev);
}
}
static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma)
{
struct bam_dmux *dmux = skb_dma->dmux;
unsigned long flags;
pm_runtime_mark_last_busy(dmux->dev);
pm_runtime_put_autosuspend(dmux->dev);
if (skb_dma->addr)
bam_dmux_skb_dma_unmap(skb_dma, DMA_TO_DEVICE);
spin_lock_irqsave(&dmux->tx_lock, flags);
skb_dma->skb = NULL;
if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB])
bam_dmux_tx_wake_queues(dmux);
spin_unlock_irqrestore(&dmux->tx_lock, flags);
}
static void bam_dmux_tx_callback(void *data)
{
struct bam_dmux_skb_dma *skb_dma = data;
struct sk_buff *skb = skb_dma->skb;
bam_dmux_tx_done(skb_dma);
dev_consume_skb_any(skb);
}
static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma *skb_dma)
{
struct bam_dmux *dmux = skb_dma->dmux;
struct dma_async_tx_descriptor *desc;
desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr,
skb_dma->skb->len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n");
return false;
}
desc->callback = bam_dmux_tx_callback;
desc->callback_param = skb_dma;
desc->cookie = dmaengine_submit(desc);
return true;
}
static struct bam_dmux_skb_dma *
bam_dmux_tx_queue(struct bam_dmux *dmux, struct sk_buff *skb)
{
struct bam_dmux_skb_dma *skb_dma;
unsigned long flags;
spin_lock_irqsave(&dmux->tx_lock, flags);
skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB];
if (skb_dma->skb) {
bam_dmux_tx_stop_queues(dmux);
spin_unlock_irqrestore(&dmux->tx_lock, flags);
return NULL;
}
skb_dma->skb = skb;
dmux->tx_next_skb++;
if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb)
bam_dmux_tx_stop_queues(dmux);
spin_unlock_irqrestore(&dmux->tx_lock, flags);
return skb_dma;
}
static int bam_dmux_send_cmd(struct bam_dmux_netdev *bndev, u8 cmd)
{
struct bam_dmux *dmux = bndev->dmux;
struct bam_dmux_skb_dma *skb_dma;
struct bam_dmux_hdr *hdr;
struct sk_buff *skb;
int ret;
skb = alloc_skb(sizeof(*hdr), GFP_KERNEL);
if (!skb)
return -ENOMEM;
hdr = skb_put_zero(skb, sizeof(*hdr));
hdr->magic = BAM_DMUX_HDR_MAGIC;
hdr->cmd = cmd;
hdr->ch = bndev->ch;
skb_dma = bam_dmux_tx_queue(dmux, skb);
if (!skb_dma) {
ret = -EAGAIN;
goto free_skb;
}
ret = pm_runtime_get_sync(dmux->dev);
if (ret < 0)
goto tx_fail;
if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) {
ret = -ENOMEM;
goto tx_fail;
}
if (!bam_dmux_skb_dma_submit_tx(skb_dma)) {
ret = -EIO;
goto tx_fail;
}
dma_async_issue_pending(dmux->tx);
return 0;
tx_fail:
bam_dmux_tx_done(skb_dma);
free_skb:
dev_kfree_skb(skb);
return ret;
}
static int bam_dmux_netdev_open(struct net_device *netdev)
{
struct bam_dmux_netdev *bndev = netdev_priv(netdev);
int ret;
ret = bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_OPEN);
if (ret)
return ret;
netif_start_queue(netdev);
return 0;
}
static int bam_dmux_netdev_stop(struct net_device *netdev)
{
struct bam_dmux_netdev *bndev = netdev_priv(netdev);
netif_stop_queue(netdev);
bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_CLOSE);
return 0;
}
static unsigned int needed_room(unsigned int avail, unsigned int needed)
{
if (avail >= needed)
return 0;
return needed - avail;
}
static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev *bndev,
struct sk_buff *skb)
{
unsigned int head = needed_room(skb_headroom(skb), BAM_DMUX_HDR_SIZE);
unsigned int pad = sizeof(u32) - skb->len % sizeof(u32);
unsigned int tail = needed_room(skb_tailroom(skb), pad);
struct bam_dmux_hdr *hdr;
int ret;
if (head || tail || skb_cloned(skb)) {
ret = pskb_expand_head(skb, head, tail, GFP_ATOMIC);
if (ret)
return ret;
}
hdr = skb_push(skb, sizeof(*hdr));
hdr->magic = BAM_DMUX_HDR_MAGIC;
hdr->signal = 0;
hdr->cmd = BAM_DMUX_CMD_DATA;
hdr->pad = pad;
hdr->ch = bndev->ch;
hdr->len = skb->len - sizeof(*hdr);
if (pad)
skb_put_zero(skb, pad);
return 0;
}
static netdev_tx_t bam_dmux_netdev_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct bam_dmux_netdev *bndev = netdev_priv(netdev);
struct bam_dmux *dmux = bndev->dmux;
struct bam_dmux_skb_dma *skb_dma;
int active, ret;
skb_dma = bam_dmux_tx_queue(dmux, skb);
if (!skb_dma)
return NETDEV_TX_BUSY;
active = pm_runtime_get(dmux->dev);
if (active < 0 && active != -EINPROGRESS)
goto drop;
ret = bam_dmux_tx_prepare_skb(bndev, skb);
if (ret)
goto drop;
if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE))
goto drop;
if (active <= 0) {
/* Cannot sleep here so mark skb for wakeup handler and return */
if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs),
&dmux->tx_deferred_skb))
queue_pm_work(&dmux->tx_wakeup_work);
return NETDEV_TX_OK;
}
if (!bam_dmux_skb_dma_submit_tx(skb_dma))
goto drop;
dma_async_issue_pending(dmux->tx);
return NETDEV_TX_OK;
drop:
bam_dmux_tx_done(skb_dma);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static void bam_dmux_tx_wakeup_work(struct work_struct *work)
{
struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work);
unsigned long pending;
int ret, i;
ret = pm_runtime_resume_and_get(dmux->dev);
if (ret < 0) {
dev_err(dmux->dev, "Failed to resume: %d\n", ret);
return;
}
pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0);
if (!pending)
goto out;
dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending);
for_each_set_bit(i, &pending, BAM_DMUX_NUM_SKB) {
bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]);
}
dma_async_issue_pending(dmux->tx);
out:
pm_runtime_mark_last_busy(dmux->dev);
pm_runtime_put_autosuspend(dmux->dev);
}
static const struct net_device_ops bam_dmux_ops = {
.ndo_open = bam_dmux_netdev_open,
.ndo_stop = bam_dmux_netdev_stop,
.ndo_start_xmit = bam_dmux_netdev_start_xmit,
};
static const struct device_type wwan_type = {
.name = "wwan",
};
static void bam_dmux_netdev_setup(struct net_device *dev)
{
dev->netdev_ops = &bam_dmux_ops;
dev->type = ARPHRD_RAWIP;
SET_NETDEV_DEVTYPE(dev, &wwan_type);
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = ETH_DATA_LEN;
dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE;
dev->needed_headroom = sizeof(struct bam_dmux_hdr);
dev->needed_tailroom = sizeof(u32); /* word-aligned */
dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
/* This perm addr will be used as interface identifier by IPv6 */
dev->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(dev->perm_addr);
}
static void bam_dmux_register_netdev_work(struct work_struct *work)
{
struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work);
struct bam_dmux_netdev *bndev;
struct net_device *netdev;
int ch, ret;
for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) {
if (dmux->netdevs[ch])
continue;
netdev = alloc_netdev(sizeof(*bndev), "wwan%d", NET_NAME_ENUM,
bam_dmux_netdev_setup);
if (!netdev)
return;
SET_NETDEV_DEV(netdev, dmux->dev);
netdev->dev_port = ch;
bndev = netdev_priv(netdev);
bndev->dmux = dmux;
bndev->ch = ch;
ret = register_netdev(netdev);
if (ret) {
dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n",
ch, ret);
free_netdev(netdev);
return;
}
dmux->netdevs[ch] = netdev;
}
}
static void bam_dmux_rx_callback(void *data);
static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma *skb_dma)
{
struct bam_dmux *dmux = skb_dma->dmux;
struct dma_async_tx_descriptor *desc;
desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr,
skb_dma->skb->len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n");
return false;
}
desc->callback = bam_dmux_rx_callback;
desc->callback_param = skb_dma;
desc->cookie = dmaengine_submit(desc);
return true;
}
static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma *skb_dma, gfp_t gfp)
{
if (!skb_dma->skb) {
skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp);
if (!skb_dma->skb)
return false;
skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE);
}
return bam_dmux_skb_dma_map(skb_dma, DMA_FROM_DEVICE) &&
bam_dmux_skb_dma_submit_rx(skb_dma);
}
static void bam_dmux_cmd_data(struct bam_dmux_skb_dma *skb_dma)
{
struct bam_dmux *dmux = skb_dma->dmux;
struct sk_buff *skb = skb_dma->skb;
struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
struct net_device *netdev = dmux->netdevs[hdr->ch];
if (!netdev || !netif_running(netdev)) {
dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch);
return;
}
if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) {
dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n",
hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE);
return;
}
skb_dma->skb = NULL; /* Hand over to network stack */
skb_pull(skb, sizeof(*hdr));
skb_trim(skb, hdr->len);
skb->dev = netdev;
/* Only Raw-IP/QMAP is supported by this driver */
switch (skb->data[0] & 0xf0) {
case 0x40:
skb->protocol = htons(ETH_P_IP);
break;
case 0x60:
skb->protocol = htons(ETH_P_IPV6);
break;
default:
skb->protocol = htons(ETH_P_MAP);
break;
}
netif_receive_skb(skb);
}
static void bam_dmux_cmd_open(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
{
struct net_device *netdev = dmux->netdevs[hdr->ch];
dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch);
if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) {
dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch);
return;
}
if (netdev) {
netif_device_attach(netdev);
} else {
/* Cannot sleep here, schedule work to register the netdev */
schedule_work(&dmux->register_netdev_work);
}
}
static void bam_dmux_cmd_close(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
{
struct net_device *netdev = dmux->netdevs[hdr->ch];
dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch);
if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) {
dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch);
return;
}
if (netdev)
netif_device_detach(netdev);
}
static void bam_dmux_rx_callback(void *data)
{
struct bam_dmux_skb_dma *skb_dma = data;
struct bam_dmux *dmux = skb_dma->dmux;
struct sk_buff *skb = skb_dma->skb;
struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
bam_dmux_skb_dma_unmap(skb_dma, DMA_FROM_DEVICE);
if (hdr->magic != BAM_DMUX_HDR_MAGIC) {
dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic);
goto out;
}
if (hdr->ch >= BAM_DMUX_NUM_CH) {
dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch);
goto out;
}
switch (hdr->cmd) {
case BAM_DMUX_CMD_DATA:
bam_dmux_cmd_data(skb_dma);
break;
case BAM_DMUX_CMD_OPEN:
bam_dmux_cmd_open(dmux, hdr);
break;
case BAM_DMUX_CMD_CLOSE:
bam_dmux_cmd_close(dmux, hdr);
break;
default:
dev_err(dmux->dev, "Unsupported command %u on channel %u\n",
hdr->cmd, hdr->ch);
break;
}
out:
if (bam_dmux_skb_dma_queue_rx(skb_dma, GFP_ATOMIC))
dma_async_issue_pending(dmux->rx);
}
static bool bam_dmux_power_on(struct bam_dmux *dmux)
{
struct device *dev = dmux->dev;
struct dma_slave_config dma_rx_conf = {
.direction = DMA_DEV_TO_MEM,
.src_maxburst = BAM_DMUX_BUFFER_SIZE,
};
int i;
dmux->rx = dma_request_chan(dev, "rx");
if (IS_ERR(dmux->rx)) {
dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx);
dmux->rx = NULL;
return false;
}
dmaengine_slave_config(dmux->rx, &dma_rx_conf);
for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL))
return false;
}
dma_async_issue_pending(dmux->rx);
return true;
}
static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],
enum dma_data_direction dir)
{
int i;
for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
struct bam_dmux_skb_dma *skb_dma = &skbs[i];
if (skb_dma->addr)
bam_dmux_skb_dma_unmap(skb_dma, dir);
if (skb_dma->skb) {
dev_kfree_skb(skb_dma->skb);
skb_dma->skb = NULL;
}
}
}
static void bam_dmux_power_off(struct bam_dmux *dmux)
{
if (dmux->tx) {
dmaengine_terminate_sync(dmux->tx);
dma_release_channel(dmux->tx);
dmux->tx = NULL;
}
if (dmux->rx) {
dmaengine_terminate_sync(dmux->rx);
dma_release_channel(dmux->rx);
dmux->rx = NULL;
}
bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE);
}
static irqreturn_t bam_dmux_pc_irq(int irq, void *data)
{
struct bam_dmux *dmux = data;
bool new_state = !dmux->pc_state;
dev_dbg(dmux->dev, "pc: %u\n", new_state);
if (new_state) {
if (bam_dmux_power_on(dmux))
bam_dmux_pc_ack(dmux);
else
bam_dmux_power_off(dmux);
} else {
bam_dmux_power_off(dmux);
bam_dmux_pc_ack(dmux);
}
dmux->pc_state = new_state;
wake_up_all(&dmux->pc_wait);
return IRQ_HANDLED;
}
static irqreturn_t bam_dmux_pc_ack_irq(int irq, void *data)
{
struct bam_dmux *dmux = data;
dev_dbg(dmux->dev, "pc ack\n");
complete_all(&dmux->pc_ack_completion);
return IRQ_HANDLED;
}
static int bam_dmux_runtime_suspend(struct device *dev)
{
struct bam_dmux *dmux = dev_get_drvdata(dev);
dev_dbg(dev, "runtime suspend\n");
bam_dmux_pc_vote(dmux, false);
return 0;
}
static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
{
struct bam_dmux *dmux = dev_get_drvdata(dev);
dev_dbg(dev, "runtime resume\n");
/* Wait until previous power down was acked */
if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
BAM_DMUX_REMOTE_TIMEOUT))
return -ETIMEDOUT;
/* Vote for power state */
bam_dmux_pc_vote(dmux, true);
/* Wait for ack */
if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
BAM_DMUX_REMOTE_TIMEOUT)) {
bam_dmux_pc_vote(dmux, false);
return -ETIMEDOUT;
}
/* Wait until we're up */
if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state,
BAM_DMUX_REMOTE_TIMEOUT)) {
bam_dmux_pc_vote(dmux, false);
return -ETIMEDOUT;
}
/* Ensure that we actually initialized successfully */
if (!dmux->rx) {
bam_dmux_pc_vote(dmux, false);
return -ENXIO;
}
/* Request TX channel if necessary */
if (dmux->tx)
return 0;
dmux->tx = dma_request_chan(dev, "tx");
if (IS_ERR(dmux->tx)) {
dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
dmux->tx = NULL;
bam_dmux_runtime_suspend(dev);
return -ENXIO;
}
return 0;
}
static int bam_dmux_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct bam_dmux *dmux;
int ret, pc_ack_irq, i;
unsigned int bit;
dmux = devm_kzalloc(dev, sizeof(*dmux), GFP_KERNEL);
if (!dmux)
return -ENOMEM;
dmux->dev = dev;
platform_set_drvdata(pdev, dmux);
dmux->pc_irq = platform_get_irq_byname(pdev, "pc");
if (dmux->pc_irq < 0)
return dmux->pc_irq;
pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack");
if (pc_ack_irq < 0)
return pc_ack_irq;
dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit);
if (IS_ERR(dmux->pc))
return dev_err_probe(dev, PTR_ERR(dmux->pc),
"Failed to get pc state\n");
dmux->pc_mask = BIT(bit);
dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit);
if (IS_ERR(dmux->pc_ack))
return dev_err_probe(dev, PTR_ERR(dmux->pc_ack),
"Failed to get pc-ack state\n");
dmux->pc_ack_mask = BIT(bit);
init_waitqueue_head(&dmux->pc_wait);
init_completion(&dmux->pc_ack_completion);
complete_all(&dmux->pc_ack_completion);
spin_lock_init(&dmux->tx_lock);
INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work);
INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work);
for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
dmux->rx_skbs[i].dmux = dmux;
dmux->tx_skbs[i].dmux = dmux;
}
/* Runtime PM manages our own power vote.
* Note that the RX path may be active even if we are runtime suspended,
* since it is controlled by the remote side.
*/
pm_runtime_set_autosuspend_delay(dev, BAM_DMUX_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
IRQF_ONESHOT, NULL, dmux);
if (ret)
return ret;
ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
IRQF_ONESHOT, NULL, dmux);
if (ret)
return ret;
ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
&dmux->pc_state);
if (ret)
return ret;
/* Check if remote finished initialization before us */
if (dmux->pc_state) {
if (bam_dmux_power_on(dmux))
bam_dmux_pc_ack(dmux);
else
bam_dmux_power_off(dmux);
}
return 0;
}
static int bam_dmux_remove(struct platform_device *pdev)
{
struct bam_dmux *dmux = platform_get_drvdata(pdev);
struct device *dev = dmux->dev;
LIST_HEAD(list);
int i;
/* Unregister network interfaces */
cancel_work_sync(&dmux->register_netdev_work);
rtnl_lock();
for (i = 0; i < BAM_DMUX_NUM_CH; ++i)
if (dmux->netdevs[i])
unregister_netdevice_queue(dmux->netdevs[i], &list);
unregister_netdevice_many(&list);
rtnl_unlock();
cancel_work_sync(&dmux->tx_wakeup_work);
/* Drop our own power vote */
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
bam_dmux_runtime_suspend(dev);
pm_runtime_set_suspended(dev);
/* Try to wait for remote side to drop power vote */
if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT))
dev_err(dev, "Timed out waiting for remote side to suspend\n");
/* Make sure everything is cleaned up before we return */
disable_irq(dmux->pc_irq);
bam_dmux_power_off(dmux);
bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE);
return 0;
}
static const struct dev_pm_ops bam_dmux_pm_ops = {
SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend, bam_dmux_runtime_resume, NULL)
};
static const struct of_device_id bam_dmux_of_match[] = {
{ .compatible = "qcom,bam-dmux" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, bam_dmux_of_match);
static struct platform_driver bam_dmux_driver = {
.probe = bam_dmux_probe,
.remove = bam_dmux_remove,
.driver = {
.name = "bam-dmux",
.pm = &bam_dmux_pm_ops,
.of_match_table = bam_dmux_of_match,
},
};
module_platform_driver(bam_dmux_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");
MODULE_AUTHOR("Stephan Gerhold <[email protected]>");
|
linux-master
|
drivers/net/wwan/qcom_bam_dmux.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021, Linaro Ltd <[email protected]> */
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/wwan.h>
#include <net/rtnetlink.h>
#include <uapi/linux/wwan.h>
/* Maximum number of minors in use */
#define WWAN_MAX_MINORS (1 << MINORBITS)
static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
static struct class *wwan_class;
static int wwan_major;
static struct dentry *wwan_debugfs_dir;
#define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
#define to_wwan_port(d) container_of(d, struct wwan_port, dev)
/* WWAN port flags */
#define WWAN_PORT_TX_OFF 0
/**
* struct wwan_device - The structure that defines a WWAN device
*
* @id: WWAN device unique ID.
* @dev: Underlying device.
* @port_id: Current available port ID to pick.
* @ops: wwan device ops
* @ops_ctxt: context to pass to ops
* @debugfs_dir: WWAN device debugfs dir
*/
struct wwan_device {
unsigned int id;
struct device dev;
atomic_t port_id;
const struct wwan_ops *ops;
void *ops_ctxt;
#ifdef CONFIG_WWAN_DEBUGFS
struct dentry *debugfs_dir;
#endif
};
/**
* struct wwan_port - The structure that defines a WWAN port
* @type: Port type
* @start_count: Port start counter
* @flags: Store port state and capabilities
* @ops: Pointer to WWAN port operations
* @ops_lock: Protect port ops
* @dev: Underlying device
* @rxq: Buffer inbound queue
* @waitqueue: The waitqueue for port fops (read/write/poll)
* @data_lock: Port specific data access serialization
* @headroom_len: SKB reserved headroom size
* @frag_len: Length to fragment packet
* @at_data: AT port specific data
*/
struct wwan_port {
enum wwan_port_type type;
unsigned int start_count;
unsigned long flags;
const struct wwan_port_ops *ops;
struct mutex ops_lock; /* Serialize ops + protect against removal */
struct device dev;
struct sk_buff_head rxq;
wait_queue_head_t waitqueue;
struct mutex data_lock; /* Port specific data access serialization */
size_t headroom_len;
size_t frag_len;
union {
struct {
struct ktermios termios;
int mdmbits;
} at_data;
};
};
static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct wwan_device *wwan = to_wwan_dev(dev);
return sprintf(buf, "%d\n", wwan->id);
}
static DEVICE_ATTR_RO(index);
static struct attribute *wwan_dev_attrs[] = {
&dev_attr_index.attr,
NULL,
};
ATTRIBUTE_GROUPS(wwan_dev);
static void wwan_dev_destroy(struct device *dev)
{
struct wwan_device *wwandev = to_wwan_dev(dev);
ida_free(&wwan_dev_ids, wwandev->id);
kfree(wwandev);
}
static const struct device_type wwan_dev_type = {
.name = "wwan_dev",
.release = wwan_dev_destroy,
.groups = wwan_dev_groups,
};
static int wwan_dev_parent_match(struct device *dev, const void *parent)
{
return (dev->type == &wwan_dev_type &&
(dev->parent == parent || dev == parent));
}
static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
{
struct device *dev;
dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
if (!dev)
return ERR_PTR(-ENODEV);
return to_wwan_dev(dev);
}
static int wwan_dev_name_match(struct device *dev, const void *name)
{
return dev->type == &wwan_dev_type &&
strcmp(dev_name(dev), name) == 0;
}
static struct wwan_device *wwan_dev_get_by_name(const char *name)
{
struct device *dev;
dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
if (!dev)
return ERR_PTR(-ENODEV);
return to_wwan_dev(dev);
}
#ifdef CONFIG_WWAN_DEBUGFS
struct dentry *wwan_get_debugfs_dir(struct device *parent)
{
struct wwan_device *wwandev;
wwandev = wwan_dev_get_by_parent(parent);
if (IS_ERR(wwandev))
return ERR_CAST(wwandev);
return wwandev->debugfs_dir;
}
EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
{
struct wwan_device *wwandev;
if (dev->type != &wwan_dev_type)
return 0;
wwandev = to_wwan_dev(dev);
return wwandev->debugfs_dir == dir;
}
static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
{
struct device *dev;
dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
if (!dev)
return ERR_PTR(-ENODEV);
return to_wwan_dev(dev);
}
void wwan_put_debugfs_dir(struct dentry *dir)
{
struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
if (WARN_ON(IS_ERR(wwandev)))
return;
/* wwan_dev_get_by_debugfs() also got a reference */
put_device(&wwandev->dev);
put_device(&wwandev->dev);
}
EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
#endif
/* This function allocates and registers a new WWAN device OR if a WWAN device
* already exist for the given parent, it gets a reference and return it.
* This function is not exported (for now), it is called indirectly via
* wwan_create_port().
*/
static struct wwan_device *wwan_create_dev(struct device *parent)
{
struct wwan_device *wwandev;
int err, id;
/* The 'find-alloc-register' operation must be protected against
* concurrent execution, a WWAN device is possibly shared between
* multiple callers or concurrently unregistered from wwan_remove_dev().
*/
mutex_lock(&wwan_register_lock);
/* If wwandev already exists, return it */
wwandev = wwan_dev_get_by_parent(parent);
if (!IS_ERR(wwandev))
goto done_unlock;
id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
if (id < 0) {
wwandev = ERR_PTR(id);
goto done_unlock;
}
wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
if (!wwandev) {
wwandev = ERR_PTR(-ENOMEM);
ida_free(&wwan_dev_ids, id);
goto done_unlock;
}
wwandev->dev.parent = parent;
wwandev->dev.class = wwan_class;
wwandev->dev.type = &wwan_dev_type;
wwandev->id = id;
dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
err = device_register(&wwandev->dev);
if (err) {
put_device(&wwandev->dev);
wwandev = ERR_PTR(err);
goto done_unlock;
}
#ifdef CONFIG_WWAN_DEBUGFS
wwandev->debugfs_dir =
debugfs_create_dir(kobject_name(&wwandev->dev.kobj),
wwan_debugfs_dir);
#endif
done_unlock:
mutex_unlock(&wwan_register_lock);
return wwandev;
}
static int is_wwan_child(struct device *dev, void *data)
{
return dev->class == wwan_class;
}
static void wwan_remove_dev(struct wwan_device *wwandev)
{
int ret;
/* Prevent concurrent picking from wwan_create_dev */
mutex_lock(&wwan_register_lock);
/* WWAN device is created and registered (get+add) along with its first
* child port, and subsequent port registrations only grab a reference
* (get). The WWAN device must then be unregistered (del+put) along with
* its last port, and reference simply dropped (put) otherwise. In the
* same fashion, we must not unregister it when the ops are still there.
*/
if (wwandev->ops)
ret = 1;
else
ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
if (!ret) {
#ifdef CONFIG_WWAN_DEBUGFS
debugfs_remove_recursive(wwandev->debugfs_dir);
#endif
device_unregister(&wwandev->dev);
} else {
put_device(&wwandev->dev);
}
mutex_unlock(&wwan_register_lock);
}
/* ------- WWAN port management ------- */
static const struct {
const char * const name; /* Port type name */
const char * const devsuf; /* Port devce name suffix */
} wwan_port_types[WWAN_PORT_MAX + 1] = {
[WWAN_PORT_AT] = {
.name = "AT",
.devsuf = "at",
},
[WWAN_PORT_MBIM] = {
.name = "MBIM",
.devsuf = "mbim",
},
[WWAN_PORT_QMI] = {
.name = "QMI",
.devsuf = "qmi",
},
[WWAN_PORT_QCDM] = {
.name = "QCDM",
.devsuf = "qcdm",
},
[WWAN_PORT_FIREHOSE] = {
.name = "FIREHOSE",
.devsuf = "firehose",
},
[WWAN_PORT_XMMRPC] = {
.name = "XMMRPC",
.devsuf = "xmmrpc",
},
};
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct wwan_port *port = to_wwan_port(dev);
return sprintf(buf, "%s\n", wwan_port_types[port->type].name);
}
static DEVICE_ATTR_RO(type);
static struct attribute *wwan_port_attrs[] = {
&dev_attr_type.attr,
NULL,
};
ATTRIBUTE_GROUPS(wwan_port);
static void wwan_port_destroy(struct device *dev)
{
struct wwan_port *port = to_wwan_port(dev);
ida_free(&minors, MINOR(port->dev.devt));
mutex_destroy(&port->data_lock);
mutex_destroy(&port->ops_lock);
kfree(port);
}
static const struct device_type wwan_port_dev_type = {
.name = "wwan_port",
.release = wwan_port_destroy,
.groups = wwan_port_groups,
};
static int wwan_port_minor_match(struct device *dev, const void *minor)
{
return (dev->type == &wwan_port_dev_type &&
MINOR(dev->devt) == *(unsigned int *)minor);
}
static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
{
struct device *dev;
dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
if (!dev)
return ERR_PTR(-ENODEV);
return to_wwan_port(dev);
}
/* Allocate and set unique name based on passed format
*
* Name allocation approach is highly inspired by the __dev_alloc_name()
* function.
*
* To avoid names collision, the caller must prevent the new port device
* registration as well as concurrent invocation of this function.
*/
static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
{
struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
const unsigned int max_ports = PAGE_SIZE * 8;
struct class_dev_iter iter;
unsigned long *idmap;
struct device *dev;
char buf[0x20];
int id;
idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL);
if (!idmap)
return -ENOMEM;
/* Collect ids of same name format ports */
class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
while ((dev = class_dev_iter_next(&iter))) {
if (dev->parent != &wwandev->dev)
continue;
if (sscanf(dev_name(dev), fmt, &id) != 1)
continue;
if (id < 0 || id >= max_ports)
continue;
set_bit(id, idmap);
}
class_dev_iter_exit(&iter);
/* Allocate unique id */
id = find_first_zero_bit(idmap, max_ports);
free_page((unsigned long)idmap);
snprintf(buf, sizeof(buf), fmt, id); /* Name generation */
dev = device_find_child_by_name(&wwandev->dev, buf);
if (dev) {
put_device(dev);
return -ENFILE;
}
return dev_set_name(&port->dev, buf);
}
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
const struct wwan_port_ops *ops,
struct wwan_port_caps *caps,
void *drvdata)
{
struct wwan_device *wwandev;
struct wwan_port *port;
char namefmt[0x20];
int minor, err;
if (type > WWAN_PORT_MAX || !ops)
return ERR_PTR(-EINVAL);
/* A port is always a child of a WWAN device, retrieve (allocate or
* pick) the WWAN device based on the provided parent device.
*/
wwandev = wwan_create_dev(parent);
if (IS_ERR(wwandev))
return ERR_CAST(wwandev);
/* A port is exposed as character device, get a minor */
minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
if (minor < 0) {
err = minor;
goto error_wwandev_remove;
}
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port) {
err = -ENOMEM;
ida_free(&minors, minor);
goto error_wwandev_remove;
}
port->type = type;
port->ops = ops;
port->frag_len = caps ? caps->frag_len : SIZE_MAX;
port->headroom_len = caps ? caps->headroom_len : 0;
mutex_init(&port->ops_lock);
skb_queue_head_init(&port->rxq);
init_waitqueue_head(&port->waitqueue);
mutex_init(&port->data_lock);
port->dev.parent = &wwandev->dev;
port->dev.class = wwan_class;
port->dev.type = &wwan_port_dev_type;
port->dev.devt = MKDEV(wwan_major, minor);
dev_set_drvdata(&port->dev, drvdata);
/* allocate unique name based on wwan device id, port type and number */
snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
wwan_port_types[port->type].devsuf);
/* Serialize ports registration */
mutex_lock(&wwan_register_lock);
__wwan_port_dev_assign_name(port, namefmt);
err = device_register(&port->dev);
mutex_unlock(&wwan_register_lock);
if (err)
goto error_put_device;
dev_info(&wwandev->dev, "port %s attached\n", dev_name(&port->dev));
return port;
error_put_device:
put_device(&port->dev);
error_wwandev_remove:
wwan_remove_dev(wwandev);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(wwan_create_port);
void wwan_remove_port(struct wwan_port *port)
{
struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
mutex_lock(&port->ops_lock);
if (port->start_count)
port->ops->stop(port);
port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
mutex_unlock(&port->ops_lock);
wake_up_interruptible(&port->waitqueue);
skb_queue_purge(&port->rxq);
dev_set_drvdata(&port->dev, NULL);
dev_info(&wwandev->dev, "port %s disconnected\n", dev_name(&port->dev));
device_unregister(&port->dev);
/* Release related wwan device */
wwan_remove_dev(wwandev);
}
EXPORT_SYMBOL_GPL(wwan_remove_port);
void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
{
skb_queue_tail(&port->rxq, skb);
wake_up_interruptible(&port->waitqueue);
}
EXPORT_SYMBOL_GPL(wwan_port_rx);
void wwan_port_txon(struct wwan_port *port)
{
clear_bit(WWAN_PORT_TX_OFF, &port->flags);
wake_up_interruptible(&port->waitqueue);
}
EXPORT_SYMBOL_GPL(wwan_port_txon);
void wwan_port_txoff(struct wwan_port *port)
{
set_bit(WWAN_PORT_TX_OFF, &port->flags);
}
EXPORT_SYMBOL_GPL(wwan_port_txoff);
void *wwan_port_get_drvdata(struct wwan_port *port)
{
return dev_get_drvdata(&port->dev);
}
EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
static int wwan_port_op_start(struct wwan_port *port)
{
int ret = 0;
mutex_lock(&port->ops_lock);
if (!port->ops) { /* Port got unplugged */
ret = -ENODEV;
goto out_unlock;
}
/* If port is already started, don't start again */
if (!port->start_count)
ret = port->ops->start(port);
if (!ret)
port->start_count++;
out_unlock:
mutex_unlock(&port->ops_lock);
return ret;
}
static void wwan_port_op_stop(struct wwan_port *port)
{
mutex_lock(&port->ops_lock);
port->start_count--;
if (!port->start_count) {
if (port->ops)
port->ops->stop(port);
skb_queue_purge(&port->rxq);
}
mutex_unlock(&port->ops_lock);
}
static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
bool nonblock)
{
int ret;
mutex_lock(&port->ops_lock);
if (!port->ops) { /* Port got unplugged */
ret = -ENODEV;
goto out_unlock;
}
if (nonblock || !port->ops->tx_blocking)
ret = port->ops->tx(port, skb);
else
ret = port->ops->tx_blocking(port, skb);
out_unlock:
mutex_unlock(&port->ops_lock);
return ret;
}
static bool is_read_blocked(struct wwan_port *port)
{
return skb_queue_empty(&port->rxq) && port->ops;
}
static bool is_write_blocked(struct wwan_port *port)
{
return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
}
static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
{
if (!is_read_blocked(port))
return 0;
if (nonblock)
return -EAGAIN;
if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
return -ERESTARTSYS;
return 0;
}
static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
{
if (!is_write_blocked(port))
return 0;
if (nonblock)
return -EAGAIN;
if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
return -ERESTARTSYS;
return 0;
}
static int wwan_port_fops_open(struct inode *inode, struct file *file)
{
struct wwan_port *port;
int err = 0;
port = wwan_port_get_by_minor(iminor(inode));
if (IS_ERR(port))
return PTR_ERR(port);
file->private_data = port;
stream_open(inode, file);
err = wwan_port_op_start(port);
if (err)
put_device(&port->dev);
return err;
}
static int wwan_port_fops_release(struct inode *inode, struct file *filp)
{
struct wwan_port *port = filp->private_data;
wwan_port_op_stop(port);
put_device(&port->dev);
return 0;
}
static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
struct wwan_port *port = filp->private_data;
struct sk_buff *skb;
size_t copied;
int ret;
ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
if (ret)
return ret;
skb = skb_dequeue(&port->rxq);
if (!skb)
return -EIO;
copied = min_t(size_t, count, skb->len);
if (copy_to_user(buf, skb->data, copied)) {
kfree_skb(skb);
return -EFAULT;
}
skb_pull(skb, copied);
/* skb is not fully consumed, keep it in the queue */
if (skb->len)
skb_queue_head(&port->rxq, skb);
else
consume_skb(skb);
return copied;
}
static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offp)
{
struct sk_buff *skb, *head = NULL, *tail = NULL;
struct wwan_port *port = filp->private_data;
size_t frag_len, remain = count;
int ret;
ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
if (ret)
return ret;
do {
frag_len = min(remain, port->frag_len);
skb = alloc_skb(frag_len + port->headroom_len, GFP_KERNEL);
if (!skb) {
ret = -ENOMEM;
goto freeskb;
}
skb_reserve(skb, port->headroom_len);
if (!head) {
head = skb;
} else if (!tail) {
skb_shinfo(head)->frag_list = skb;
tail = skb;
} else {
tail->next = skb;
tail = skb;
}
if (copy_from_user(skb_put(skb, frag_len), buf + count - remain, frag_len)) {
ret = -EFAULT;
goto freeskb;
}
if (skb != head) {
head->data_len += skb->len;
head->len += skb->len;
head->truesize += skb->truesize;
}
} while (remain -= frag_len);
ret = wwan_port_op_tx(port, head, !!(filp->f_flags & O_NONBLOCK));
if (!ret)
return count;
freeskb:
kfree_skb(head);
return ret;
}
static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
{
struct wwan_port *port = filp->private_data;
__poll_t mask = 0;
poll_wait(filp, &port->waitqueue, wait);
mutex_lock(&port->ops_lock);
if (port->ops && port->ops->tx_poll)
mask |= port->ops->tx_poll(port, filp, wait);
else if (!is_write_blocked(port))
mask |= EPOLLOUT | EPOLLWRNORM;
if (!is_read_blocked(port))
mask |= EPOLLIN | EPOLLRDNORM;
if (!port->ops)
mask |= EPOLLHUP | EPOLLERR;
mutex_unlock(&port->ops_lock);
return mask;
}
/* Implements minimalistic stub terminal IOCTLs support */
static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd,
unsigned long arg)
{
int ret = 0;
mutex_lock(&port->data_lock);
switch (cmd) {
case TCFLSH:
break;
case TCGETS:
if (copy_to_user((void __user *)arg, &port->at_data.termios,
sizeof(struct termios)))
ret = -EFAULT;
break;
case TCSETS:
case TCSETSW:
case TCSETSF:
if (copy_from_user(&port->at_data.termios, (void __user *)arg,
sizeof(struct termios)))
ret = -EFAULT;
break;
#ifdef TCGETS2
case TCGETS2:
if (copy_to_user((void __user *)arg, &port->at_data.termios,
sizeof(struct termios2)))
ret = -EFAULT;
break;
case TCSETS2:
case TCSETSW2:
case TCSETSF2:
if (copy_from_user(&port->at_data.termios, (void __user *)arg,
sizeof(struct termios2)))
ret = -EFAULT;
break;
#endif
case TIOCMGET:
ret = put_user(port->at_data.mdmbits, (int __user *)arg);
break;
case TIOCMSET:
case TIOCMBIC:
case TIOCMBIS: {
int mdmbits;
if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) {
ret = -EFAULT;
break;
}
if (cmd == TIOCMBIC)
port->at_data.mdmbits &= ~mdmbits;
else if (cmd == TIOCMBIS)
port->at_data.mdmbits |= mdmbits;
else
port->at_data.mdmbits = mdmbits;
break;
}
default:
ret = -ENOIOCTLCMD;
}
mutex_unlock(&port->data_lock);
return ret;
}
static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct wwan_port *port = filp->private_data;
int res;
if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */
res = wwan_port_fops_at_ioctl(port, cmd, arg);
if (res != -ENOIOCTLCMD)
return res;
}
switch (cmd) {
case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */
unsigned long flags;
struct sk_buff *skb;
int amount = 0;
spin_lock_irqsave(&port->rxq.lock, flags);
skb_queue_walk(&port->rxq, skb)
amount += skb->len;
spin_unlock_irqrestore(&port->rxq.lock, flags);
return put_user(amount, (int __user *)arg);
}
default:
return -ENOIOCTLCMD;
}
}
static const struct file_operations wwan_port_fops = {
.owner = THIS_MODULE,
.open = wwan_port_fops_open,
.release = wwan_port_fops_release,
.read = wwan_port_fops_read,
.write = wwan_port_fops_write,
.poll = wwan_port_fops_poll,
.unlocked_ioctl = wwan_port_fops_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ptr_ioctl,
#endif
.llseek = noop_llseek,
};
static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (!data)
return -EINVAL;
if (!tb[IFLA_PARENT_DEV_NAME])
return -EINVAL;
if (!data[IFLA_WWAN_LINK_ID])
return -EINVAL;
return 0;
}
static struct device_type wwan_type = { .name = "wwan" };
static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
const char *ifname,
unsigned char name_assign_type,
unsigned int num_tx_queues,
unsigned int num_rx_queues)
{
const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]);
struct wwan_device *wwandev = wwan_dev_get_by_name(devname);
struct net_device *dev;
unsigned int priv_size;
if (IS_ERR(wwandev))
return ERR_CAST(wwandev);
/* only supported if ops were registered (not just ports) */
if (!wwandev->ops) {
dev = ERR_PTR(-EOPNOTSUPP);
goto out;
}
priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size;
dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type,
wwandev->ops->setup, num_tx_queues, num_rx_queues);
if (dev) {
SET_NETDEV_DEV(dev, &wwandev->dev);
SET_NETDEV_DEVTYPE(dev, &wwan_type);
}
out:
/* release the reference */
put_device(&wwandev->dev);
return dev;
}
static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
struct wwan_netdev_priv *priv = netdev_priv(dev);
int ret;
if (IS_ERR(wwandev))
return PTR_ERR(wwandev);
/* shouldn't have a netdev (left) with us as parent so WARN */
if (WARN_ON(!wwandev->ops)) {
ret = -EOPNOTSUPP;
goto out;
}
priv->link_id = link_id;
if (wwandev->ops->newlink)
ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
link_id, extack);
else
ret = register_netdevice(dev);
out:
/* release the reference */
put_device(&wwandev->dev);
return ret;
}
static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
{
struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
if (IS_ERR(wwandev))
return;
/* shouldn't have a netdev (left) with us as parent so WARN */
if (WARN_ON(!wwandev->ops))
goto out;
if (wwandev->ops->dellink)
wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
else
unregister_netdevice_queue(dev, head);
out:
/* release the reference */
put_device(&wwandev->dev);
}
static size_t wwan_rtnl_get_size(const struct net_device *dev)
{
return
nla_total_size(4) + /* IFLA_WWAN_LINK_ID */
0;
}
static int wwan_rtnl_fill_info(struct sk_buff *skb,
const struct net_device *dev)
{
struct wwan_netdev_priv *priv = netdev_priv(dev);
if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
[IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
};
static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
.kind = "wwan",
.maxtype = __IFLA_WWAN_MAX,
.alloc = wwan_rtnl_alloc,
.validate = wwan_rtnl_validate,
.newlink = wwan_rtnl_newlink,
.dellink = wwan_rtnl_dellink,
.get_size = wwan_rtnl_get_size,
.fill_info = wwan_rtnl_fill_info,
.policy = wwan_rtnl_policy,
};
static void wwan_create_default_link(struct wwan_device *wwandev,
u32 def_link_id)
{
struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
struct nlattr *data[IFLA_WWAN_MAX + 1];
struct net_device *dev;
struct nlmsghdr *nlh;
struct sk_buff *msg;
/* Forge attributes required to create a WWAN netdev. We first
* build a netlink message and then parse it. This looks
* odd, but such approach is less error prone.
*/
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (WARN_ON(!msg))
return;
nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0);
if (WARN_ON(!nlh))
goto free_attrs;
if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev)))
goto free_attrs;
tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO);
if (!tb[IFLA_LINKINFO])
goto free_attrs;
linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA);
if (!linkinfo[IFLA_INFO_DATA])
goto free_attrs;
if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id))
goto free_attrs;
nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]);
nla_nest_end(msg, tb[IFLA_LINKINFO]);
nlmsg_end(msg, nlh);
/* The next three parsing calls can not fail */
nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL);
nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO],
NULL, NULL);
nla_parse_nested_deprecated(data, IFLA_WWAN_MAX,
linkinfo[IFLA_INFO_DATA], NULL, NULL);
rtnl_lock();
dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM,
&wwan_rtnl_link_ops, tb, NULL);
if (WARN_ON(IS_ERR(dev)))
goto unlock;
if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
free_netdev(dev);
goto unlock;
}
rtnl_configure_link(dev, NULL, 0, NULL); /* Link initialized, notify new link */
unlock:
rtnl_unlock();
free_attrs:
nlmsg_free(msg);
}
/**
* wwan_register_ops - register WWAN device ops
* @parent: Device to use as parent and shared by all WWAN ports and
* created netdevs
* @ops: operations to register
* @ctxt: context to pass to operations
* @def_link_id: id of the default link that will be automatically created by
* the WWAN core for the WWAN device. The default link will not be created
* if the passed value is WWAN_NO_DEFAULT_LINK.
*
* Returns: 0 on success, a negative error code on failure
*/
int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
void *ctxt, u32 def_link_id)
{
struct wwan_device *wwandev;
if (WARN_ON(!parent || !ops || !ops->setup))
return -EINVAL;
wwandev = wwan_create_dev(parent);
if (IS_ERR(wwandev))
return PTR_ERR(wwandev);
if (WARN_ON(wwandev->ops)) {
wwan_remove_dev(wwandev);
return -EBUSY;
}
wwandev->ops = ops;
wwandev->ops_ctxt = ctxt;
/* NB: we do not abort ops registration in case of default link
* creation failure. Link ops is the management interface, while the
* default link creation is a service option. And we should not prevent
* a user from manually creating a link latter if service option failed
* now.
*/
if (def_link_id != WWAN_NO_DEFAULT_LINK)
wwan_create_default_link(wwandev, def_link_id);
return 0;
}
EXPORT_SYMBOL_GPL(wwan_register_ops);
/* Enqueue child netdev deletion */
static int wwan_child_dellink(struct device *dev, void *data)
{
struct list_head *kill_list = data;
if (dev->type == &wwan_type)
wwan_rtnl_dellink(to_net_dev(dev), kill_list);
return 0;
}
/**
* wwan_unregister_ops - remove WWAN device ops
* @parent: Device to use as parent and shared by all WWAN ports and
* created netdevs
*/
void wwan_unregister_ops(struct device *parent)
{
struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
LIST_HEAD(kill_list);
if (WARN_ON(IS_ERR(wwandev)))
return;
if (WARN_ON(!wwandev->ops)) {
put_device(&wwandev->dev);
return;
}
/* put the reference obtained by wwan_dev_get_by_parent(),
* we should still have one (that the owner is giving back
* now) due to the ops being assigned.
*/
put_device(&wwandev->dev);
rtnl_lock(); /* Prevent concurent netdev(s) creation/destroying */
/* Remove all child netdev(s), using batch removing */
device_for_each_child(&wwandev->dev, &kill_list,
wwan_child_dellink);
unregister_netdevice_many(&kill_list);
wwandev->ops = NULL; /* Finally remove ops */
rtnl_unlock();
wwandev->ops_ctxt = NULL;
wwan_remove_dev(wwandev);
}
EXPORT_SYMBOL_GPL(wwan_unregister_ops);
static int __init wwan_init(void)
{
int err;
err = rtnl_link_register(&wwan_rtnl_link_ops);
if (err)
return err;
wwan_class = class_create("wwan");
if (IS_ERR(wwan_class)) {
err = PTR_ERR(wwan_class);
goto unregister;
}
/* chrdev used for wwan ports */
wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
&wwan_port_fops);
if (wwan_major < 0) {
err = wwan_major;
goto destroy;
}
#ifdef CONFIG_WWAN_DEBUGFS
wwan_debugfs_dir = debugfs_create_dir("wwan", NULL);
#endif
return 0;
destroy:
class_destroy(wwan_class);
unregister:
rtnl_link_unregister(&wwan_rtnl_link_ops);
return err;
}
static void __exit wwan_exit(void)
{
debugfs_remove_recursive(wwan_debugfs_dir);
__unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
rtnl_link_unregister(&wwan_rtnl_link_ops);
class_destroy(wwan_class);
}
module_init(wwan_init);
module_exit(wwan_exit);
MODULE_AUTHOR("Loic Poulain <[email protected]>");
MODULE_DESCRIPTION("WWAN core");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/net/wwan/wwan_core.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* MHI MBIM Network driver - Network/MBIM over MHI bus
*
* Copyright (C) 2021 Linaro Ltd <[email protected]>
*
* This driver copy some code from cdc_ncm, which is:
* Copyright (C) ST-Ericsson 2010-2012
* and cdc_mbim, which is:
* Copyright (c) 2012 Smith Micro Software, Inc.
* Copyright (c) 2012 Bjørn Mork <[email protected]>
*
*/
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/mhi.h>
#include <linux/mii.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/u64_stats_sync.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc_ncm.h>
#include <linux/wwan.h>
/* 3500 allows to optimize skb allocation, the skbs will basically fit in
* one 4K page. Large MBIM packets will simply be split over several MHI
* transfers and chained by the MHI net layer (zerocopy).
*/
#define MHI_DEFAULT_MRU 3500
#define MHI_MBIM_DEFAULT_MTU 1500
#define MHI_MAX_BUF_SZ 0xffff
#define MBIM_NDP16_SIGN_MASK 0x00ffffff
#define MHI_MBIM_LINK_HASH_SIZE 8
#define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE)
struct mhi_mbim_link {
struct mhi_mbim_context *mbim;
struct net_device *ndev;
unsigned int session;
/* stats */
u64_stats_t rx_packets;
u64_stats_t rx_bytes;
u64_stats_t rx_errors;
u64_stats_t tx_packets;
u64_stats_t tx_bytes;
u64_stats_t tx_errors;
u64_stats_t tx_dropped;
struct u64_stats_sync tx_syncp;
struct u64_stats_sync rx_syncp;
struct hlist_node hlnode;
};
struct mhi_mbim_context {
struct mhi_device *mdev;
struct sk_buff *skbagg_head;
struct sk_buff *skbagg_tail;
unsigned int mru;
u32 rx_queue_sz;
u16 rx_seq;
u16 tx_seq;
struct delayed_work rx_refill;
spinlock_t tx_lock;
struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE];
};
struct mbim_tx_hdr {
struct usb_cdc_ncm_nth16 nth16;
struct usb_cdc_ncm_ndp16 ndp16;
struct usb_cdc_ncm_dpe16 dpe16[2];
} __packed;
static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
unsigned int session)
{
struct mhi_mbim_link *link;
hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) {
if (link->session == session)
return link;
}
return NULL;
}
static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
u16 tx_seq)
{
unsigned int dgram_size = skb->len;
struct usb_cdc_ncm_nth16 *nth16;
struct usb_cdc_ncm_ndp16 *ndp16;
struct mbim_tx_hdr *mbim_hdr;
/* Only one NDP is sent, containing the IP packet (no aggregation) */
/* Ensure we have enough headroom for crafting MBIM header */
if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
dev_kfree_skb_any(skb);
return NULL;
}
mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
/* Fill NTB header */
nth16 = &mbim_hdr->nth16;
nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
nth16->wSequence = cpu_to_le16(tx_seq);
nth16->wBlockLength = cpu_to_le16(skb->len);
nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
/* Fill the unique NDP */
ndp16 = &mbim_hdr->ndp16;
ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
+ sizeof(struct usb_cdc_ncm_dpe16) * 2);
ndp16->wNextNdpIndex = 0;
/* Datagram follows the mbim header */
ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
/* null termination */
ndp16->dpe16[1].wDatagramIndex = 0;
ndp16->dpe16[1].wDatagramLength = 0;
return skb;
}
static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
struct mhi_mbim_context *mbim = link->mbim;
unsigned long flags;
int err = -ENOMEM;
/* Serialize MHI channel queuing and MBIM seq */
spin_lock_irqsave(&mbim->tx_lock, flags);
skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq);
if (unlikely(!skb))
goto exit_unlock;
err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
netif_stop_queue(ndev);
if (!err)
mbim->tx_seq++;
exit_unlock:
spin_unlock_irqrestore(&mbim->tx_lock, flags);
if (unlikely(err)) {
net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
ndev->name, err);
dev_kfree_skb_any(skb);
goto exit_drop;
}
return NETDEV_TX_OK;
exit_drop:
u64_stats_update_begin(&link->tx_syncp);
u64_stats_inc(&link->tx_dropped);
u64_stats_update_end(&link->tx_syncp);
return NETDEV_TX_OK;
}
static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb)
{
struct usb_cdc_ncm_nth16 *nth16;
int len;
if (skb->len < sizeof(struct usb_cdc_ncm_nth16) +
sizeof(struct usb_cdc_ncm_ndp16)) {
net_err_ratelimited("frame too short\n");
return -EINVAL;
}
nth16 = (struct usb_cdc_ncm_nth16 *)skb->data;
if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
net_err_ratelimited("invalid NTH16 signature <%#010x>\n",
le32_to_cpu(nth16->dwSignature));
return -EINVAL;
}
/* No limit on the block length, except the size of the data pkt */
len = le16_to_cpu(nth16->wBlockLength);
if (len > skb->len) {
net_err_ratelimited("NTB does not fit into the skb %u/%u\n",
len, skb->len);
return -EINVAL;
}
if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
(mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
!(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
mbim->rx_seq, le16_to_cpu(nth16->wSequence));
}
mbim->rx_seq = le16_to_cpu(nth16->wSequence);
return le16_to_cpu(nth16->wNdpIndex);
}
static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
{
int ret;
if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
net_err_ratelimited("invalid DPT16 length <%u>\n",
le16_to_cpu(ndp16->wLength));
return -EINVAL;
}
ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16))
/ sizeof(struct usb_cdc_ncm_dpe16));
ret--; /* Last entry is always a NULL terminator */
if (sizeof(struct usb_cdc_ncm_ndp16) +
ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) {
net_err_ratelimited("Invalid nframes = %d\n", ret);
return -EINVAL;
}
return ret;
}
static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb)
{
int ndpoffset;
/* Check NTB header and retrieve first NDP offset */
ndpoffset = mbim_rx_verify_nth16(mbim, skb);
if (ndpoffset < 0) {
net_err_ratelimited("mbim: Incorrect NTB header\n");
goto error;
}
/* Process each NDP */
while (1) {
struct usb_cdc_ncm_ndp16 ndp16;
struct usb_cdc_ncm_dpe16 dpe16;
struct mhi_mbim_link *link;
int nframes, n, dpeoffset;
unsigned int session;
if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) {
net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n",
ndpoffset);
goto error;
}
/* Check NDP header and retrieve number of datagrams */
nframes = mbim_rx_verify_ndp16(skb, &ndp16);
if (nframes < 0) {
net_err_ratelimited("mbim: Incorrect NDP16\n");
goto error;
}
/* Only IP data type supported, no DSS in MHI context */
if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
!= cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
net_err_ratelimited("mbim: Unsupported NDP type\n");
goto next_ndp;
}
session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24;
rcu_read_lock();
link = mhi_mbim_get_link_rcu(mbim, session);
if (!link) {
net_err_ratelimited("mbim: bad packet session (%u)\n", session);
goto unlock;
}
/* de-aggregate and deliver IP packets */
dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16);
for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) {
u16 dgram_offset, dgram_len;
struct sk_buff *skbn;
if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16)))
break;
dgram_offset = le16_to_cpu(dpe16.wDatagramIndex);
dgram_len = le16_to_cpu(dpe16.wDatagramLength);
if (!dgram_offset || !dgram_len)
break; /* null terminator */
skbn = netdev_alloc_skb(link->ndev, dgram_len);
if (!skbn)
continue;
skb_put(skbn, dgram_len);
skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len);
switch (skbn->data[0] & 0xf0) {
case 0x40:
skbn->protocol = htons(ETH_P_IP);
break;
case 0x60:
skbn->protocol = htons(ETH_P_IPV6);
break;
default:
net_err_ratelimited("%s: unknown protocol\n",
link->ndev->name);
dev_kfree_skb_any(skbn);
u64_stats_update_begin(&link->rx_syncp);
u64_stats_inc(&link->rx_errors);
u64_stats_update_end(&link->rx_syncp);
continue;
}
u64_stats_update_begin(&link->rx_syncp);
u64_stats_inc(&link->rx_packets);
u64_stats_add(&link->rx_bytes, skbn->len);
u64_stats_update_end(&link->rx_syncp);
netif_rx(skbn);
}
unlock:
rcu_read_unlock();
next_ndp:
/* Other NDP to process? */
ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex);
if (!ndpoffset)
break;
}
/* free skb */
dev_consume_skb_any(skb);
return;
error:
dev_kfree_skb_any(skb);
}
static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim,
struct sk_buff *skb)
{
struct sk_buff *head = mbim->skbagg_head;
struct sk_buff *tail = mbim->skbagg_tail;
/* This is non-paged skb chaining using frag_list */
if (!head) {
mbim->skbagg_head = skb;
return skb;
}
if (!skb_shinfo(head)->frag_list)
skb_shinfo(head)->frag_list = skb;
else
tail->next = skb;
head->len += skb->len;
head->data_len += skb->len;
head->truesize += skb->truesize;
mbim->skbagg_tail = skb;
return mbim->skbagg_head;
}
static void mhi_net_rx_refill_work(struct work_struct *work)
{
struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context,
rx_refill.work);
struct mhi_device *mdev = mbim->mdev;
int err;
while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL);
if (unlikely(!skb))
break;
err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
mbim->mru, MHI_EOT);
if (unlikely(err)) {
kfree_skb(skb);
break;
}
/* Do not hog the CPU if rx buffers are consumed faster than
* queued (unlikely).
*/
cond_resched();
}
/* If we're still starved of rx buffers, reschedule later */
if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz)
schedule_delayed_work(&mbim->rx_refill, HZ / 2);
}
static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res)
{
struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
struct sk_buff *skb = mhi_res->buf_addr;
int free_desc_count;
free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
if (unlikely(mhi_res->transaction_status)) {
switch (mhi_res->transaction_status) {
case -EOVERFLOW:
/* Packet has been split over multiple transfers */
skb_put(skb, mhi_res->bytes_xferd);
mhi_net_skb_agg(mbim, skb);
break;
case -ENOTCONN:
/* MHI layer stopping/resetting the DL channel */
dev_kfree_skb_any(skb);
return;
default:
/* Unknown error, simply drop */
dev_kfree_skb_any(skb);
}
} else {
skb_put(skb, mhi_res->bytes_xferd);
if (mbim->skbagg_head) {
/* Aggregate the final fragment */
skb = mhi_net_skb_agg(mbim, skb);
mbim->skbagg_head = NULL;
}
mhi_mbim_rx(mbim, skb);
}
/* Refill if RX buffers queue becomes low */
if (free_desc_count >= mbim->rx_queue_sz / 2)
schedule_delayed_work(&mbim->rx_refill, 0);
}
static void mhi_mbim_ndo_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
{
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
unsigned int start;
do {
start = u64_stats_fetch_begin(&link->rx_syncp);
stats->rx_packets = u64_stats_read(&link->rx_packets);
stats->rx_bytes = u64_stats_read(&link->rx_bytes);
stats->rx_errors = u64_stats_read(&link->rx_errors);
} while (u64_stats_fetch_retry(&link->rx_syncp, start));
do {
start = u64_stats_fetch_begin(&link->tx_syncp);
stats->tx_packets = u64_stats_read(&link->tx_packets);
stats->tx_bytes = u64_stats_read(&link->tx_bytes);
stats->tx_errors = u64_stats_read(&link->tx_errors);
stats->tx_dropped = u64_stats_read(&link->tx_dropped);
} while (u64_stats_fetch_retry(&link->tx_syncp, start));
}
static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev,
struct mhi_result *mhi_res)
{
struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
struct sk_buff *skb = mhi_res->buf_addr;
struct net_device *ndev = skb->dev;
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
/* Hardware has consumed the buffer, so free the skb (which is not
* freed by the MHI stack) and perform accounting.
*/
dev_consume_skb_any(skb);
u64_stats_update_begin(&link->tx_syncp);
if (unlikely(mhi_res->transaction_status)) {
/* MHI layer stopping/resetting the UL channel */
if (mhi_res->transaction_status == -ENOTCONN) {
u64_stats_update_end(&link->tx_syncp);
return;
}
u64_stats_inc(&link->tx_errors);
} else {
u64_stats_inc(&link->tx_packets);
u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd);
}
u64_stats_update_end(&link->tx_syncp);
if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
netif_wake_queue(ndev);
}
static int mhi_mbim_ndo_open(struct net_device *ndev)
{
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
/* Feed the MHI rx buffer pool */
schedule_delayed_work(&link->mbim->rx_refill, 0);
/* Carrier is established via out-of-band channel (e.g. qmi) */
netif_carrier_on(ndev);
netif_start_queue(ndev);
return 0;
}
static int mhi_mbim_ndo_stop(struct net_device *ndev)
{
netif_stop_queue(ndev);
netif_carrier_off(ndev);
return 0;
}
static const struct net_device_ops mhi_mbim_ndo = {
.ndo_open = mhi_mbim_ndo_open,
.ndo_stop = mhi_mbim_ndo_stop,
.ndo_start_xmit = mhi_mbim_ndo_xmit,
.ndo_get_stats64 = mhi_mbim_ndo_get_stats64,
};
static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
struct netlink_ext_ack *extack)
{
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
struct mhi_mbim_context *mbim = ctxt;
link->session = if_id;
link->mbim = mbim;
link->ndev = ndev;
u64_stats_init(&link->rx_syncp);
u64_stats_init(&link->tx_syncp);
rcu_read_lock();
if (mhi_mbim_get_link_rcu(mbim, if_id)) {
rcu_read_unlock();
return -EEXIST;
}
rcu_read_unlock();
/* Already protected by RTNL lock */
hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]);
return register_netdevice(ndev);
}
static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev,
struct list_head *head)
{
struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
hlist_del_init_rcu(&link->hlnode);
synchronize_rcu();
unregister_netdevice_queue(ndev, head);
}
static void mhi_mbim_setup(struct net_device *ndev)
{
ndev->header_ops = NULL; /* No header */
ndev->type = ARPHRD_RAWIP;
ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
ndev->hard_header_len = 0;
ndev->addr_len = 0;
ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
ndev->netdev_ops = &mhi_mbim_ndo;
ndev->mtu = MHI_MBIM_DEFAULT_MTU;
ndev->min_mtu = ETH_MIN_MTU;
ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
ndev->tx_queue_len = 1000;
ndev->needs_free_netdev = true;
}
static const struct wwan_ops mhi_mbim_wwan_ops = {
.priv_size = sizeof(struct mhi_mbim_link),
.setup = mhi_mbim_setup,
.newlink = mhi_mbim_newlink,
.dellink = mhi_mbim_dellink,
};
static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
struct mhi_mbim_context *mbim;
int err;
mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
if (!mbim)
return -ENOMEM;
spin_lock_init(&mbim->tx_lock);
dev_set_drvdata(&mhi_dev->dev, mbim);
mbim->mdev = mhi_dev;
mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU;
INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work);
/* Start MHI channels */
err = mhi_prepare_for_transfer(mhi_dev);
if (err)
return err;
/* Number of transfer descriptors determines size of the queue */
mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
/* Register wwan link ops with MHI controller representing WWAN instance */
return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
}
static void mhi_mbim_remove(struct mhi_device *mhi_dev)
{
struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
mhi_unprepare_from_transfer(mhi_dev);
cancel_delayed_work_sync(&mbim->rx_refill);
wwan_unregister_ops(&cntrl->mhi_dev->dev);
kfree_skb(mbim->skbagg_head);
dev_set_drvdata(&mhi_dev->dev, NULL);
}
static const struct mhi_device_id mhi_mbim_id_table[] = {
/* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
{ .chan = "IP_HW0_MBIM", .driver_data = 0 },
{}
};
MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table);
static struct mhi_driver mhi_mbim_driver = {
.probe = mhi_mbim_probe,
.remove = mhi_mbim_remove,
.dl_xfer_cb = mhi_mbim_dl_callback,
.ul_xfer_cb = mhi_mbim_ul_callback,
.id_table = mhi_mbim_id_table,
.driver = {
.name = "mhi_wwan_mbim",
.owner = THIS_MODULE,
},
};
module_mhi_driver(mhi_mbim_driver);
MODULE_AUTHOR("Loic Poulain <[email protected]>");
MODULE_DESCRIPTION("Network/MBIM over MHI");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/net/wwan/mhi_wwan_mbim.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/slab.h>
#include "iosm_ipc_mmio.h"
#include "iosm_ipc_mux.h"
/* Definition of MMIO offsets
* note that MMIO_CI offsets are relative to end of chip info structure
*/
/* MMIO chip info size in bytes */
#define MMIO_CHIP_INFO_SIZE 60
/* CP execution stage */
#define MMIO_OFFSET_EXECUTION_STAGE 0x00
/* Boot ROM Chip Info struct */
#define MMIO_OFFSET_CHIP_INFO 0x04
#define MMIO_OFFSET_ROM_EXIT_CODE 0x40
#define MMIO_OFFSET_PSI_ADDRESS 0x54
#define MMIO_OFFSET_PSI_SIZE 0x5C
#define MMIO_OFFSET_IPC_STATUS 0x60
#define MMIO_OFFSET_CONTEXT_INFO 0x64
#define MMIO_OFFSET_BASE_ADDR 0x6C
#define MMIO_OFFSET_END_ADDR 0x74
#define MMIO_OFFSET_CP_VERSION 0xF0
#define MMIO_OFFSET_CP_CAPABILITIES 0xF4
/* Timeout in 50 msec to wait for the modem boot code to write a valid
* execution stage into mmio area
*/
#define IPC_MMIO_EXEC_STAGE_TIMEOUT 50
/* check if exec stage has one of the valid values */
static bool ipc_mmio_is_valid_exec_stage(enum ipc_mem_exec_stage stage)
{
switch (stage) {
case IPC_MEM_EXEC_STAGE_BOOT:
case IPC_MEM_EXEC_STAGE_PSI:
case IPC_MEM_EXEC_STAGE_EBL:
case IPC_MEM_EXEC_STAGE_RUN:
case IPC_MEM_EXEC_STAGE_CRASH:
case IPC_MEM_EXEC_STAGE_CD_READY:
return true;
default:
return false;
}
}
void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
{
u32 cp_cap;
unsigned int ver;
ver = ipc_mmio_get_cp_version(ipc_mmio);
cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability);
ipc_mmio->mux_protocol = ((ver >= IOSM_CP_VERSION) && (cp_cap &
(UL_AGGR | DL_AGGR))) ? MUX_AGGREGATION
: MUX_LITE;
ipc_mmio->has_ul_flow_credit =
(ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
}
struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
{
struct iosm_mmio *ipc_mmio = kzalloc(sizeof(*ipc_mmio), GFP_KERNEL);
int retries = IPC_MMIO_EXEC_STAGE_TIMEOUT;
enum ipc_mem_exec_stage stage;
if (!ipc_mmio)
return NULL;
ipc_mmio->dev = dev;
ipc_mmio->base = mmio;
ipc_mmio->offset.exec_stage = MMIO_OFFSET_EXECUTION_STAGE;
/* Check for a valid execution stage to make sure that the boot code
* has correctly initialized the MMIO area.
*/
do {
stage = ipc_mmio_get_exec_stage(ipc_mmio);
if (ipc_mmio_is_valid_exec_stage(stage))
break;
msleep(20);
} while (retries-- > 0);
if (!retries) {
dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
goto init_fail;
}
ipc_mmio->offset.chip_info = MMIO_OFFSET_CHIP_INFO;
/* read chip info size and version from chip info structure */
ipc_mmio->chip_info_version =
ioread8(ipc_mmio->base + ipc_mmio->offset.chip_info);
/* Increment of 2 is needed as the size value in the chip info
* excludes the version and size field, which are always present
*/
ipc_mmio->chip_info_size =
ioread8(ipc_mmio->base + ipc_mmio->offset.chip_info + 1) + 2;
if (ipc_mmio->chip_info_size != MMIO_CHIP_INFO_SIZE) {
dev_err(ipc_mmio->dev, "Unexpected Chip Info");
goto init_fail;
}
ipc_mmio->offset.rom_exit_code = MMIO_OFFSET_ROM_EXIT_CODE;
ipc_mmio->offset.psi_address = MMIO_OFFSET_PSI_ADDRESS;
ipc_mmio->offset.psi_size = MMIO_OFFSET_PSI_SIZE;
ipc_mmio->offset.ipc_status = MMIO_OFFSET_IPC_STATUS;
ipc_mmio->offset.context_info = MMIO_OFFSET_CONTEXT_INFO;
ipc_mmio->offset.ap_win_base = MMIO_OFFSET_BASE_ADDR;
ipc_mmio->offset.ap_win_end = MMIO_OFFSET_END_ADDR;
ipc_mmio->offset.cp_version = MMIO_OFFSET_CP_VERSION;
ipc_mmio->offset.cp_capability = MMIO_OFFSET_CP_CAPABILITIES;
return ipc_mmio;
init_fail:
kfree(ipc_mmio);
return NULL;
}
enum ipc_mem_exec_stage ipc_mmio_get_exec_stage(struct iosm_mmio *ipc_mmio)
{
if (!ipc_mmio)
return IPC_MEM_EXEC_STAGE_INVALID;
return (enum ipc_mem_exec_stage)ioread32(ipc_mmio->base +
ipc_mmio->offset.exec_stage);
}
void ipc_mmio_copy_chip_info(struct iosm_mmio *ipc_mmio, void *dest,
size_t size)
{
if (ipc_mmio && dest)
memcpy_fromio(dest, ipc_mmio->base + ipc_mmio->offset.chip_info,
size);
}
enum ipc_mem_device_ipc_state ipc_mmio_get_ipc_state(struct iosm_mmio *ipc_mmio)
{
if (!ipc_mmio)
return IPC_MEM_DEVICE_IPC_INVALID;
return (enum ipc_mem_device_ipc_state)ioread32(ipc_mmio->base +
ipc_mmio->offset.ipc_status);
}
enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio)
{
if (!ipc_mmio)
return IMEM_ROM_EXIT_FAIL;
return (enum rom_exit_code)ioread32(ipc_mmio->base +
ipc_mmio->offset.rom_exit_code);
}
void ipc_mmio_config(struct iosm_mmio *ipc_mmio)
{
if (!ipc_mmio)
return;
/* AP memory window (full window is open and active so that modem checks
* each AP address) 0 means don't check on modem side.
*/
iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_base);
iowrite64(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end);
iowrite64(ipc_mmio->context_info_addr,
ipc_mmio->base + ipc_mmio->offset.context_info);
}
void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
u32 size)
{
if (!ipc_mmio)
return;
iowrite64(addr, ipc_mmio->base + ipc_mmio->offset.psi_address);
iowrite32(size, ipc_mmio->base + ipc_mmio->offset.psi_size);
}
void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio, phys_addr_t addr)
{
if (!ipc_mmio)
return;
/* store context_info address. This will be stored in the mmio area
* during IPC_MEM_DEVICE_IPC_INIT state via ipc_mmio_config()
*/
ipc_mmio->context_info_addr = addr;
}
int ipc_mmio_get_cp_version(struct iosm_mmio *ipc_mmio)
{
if (ipc_mmio)
return ioread32(ipc_mmio->base + ipc_mmio->offset.cp_version);
return -EFAULT;
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_mmio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
#include <linux/debugfs.h>
#include <linux/wwan.h>
#include "iosm_ipc_imem.h"
#include "iosm_ipc_trace.h"
#include "iosm_ipc_debugfs.h"
void ipc_debugfs_init(struct iosm_imem *ipc_imem)
{
ipc_imem->debugfs_wwan_dir = wwan_get_debugfs_dir(ipc_imem->dev);
ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
ipc_imem->debugfs_wwan_dir);
ipc_imem->trace = ipc_trace_init(ipc_imem);
if (!ipc_imem->trace)
dev_warn(ipc_imem->dev, "trace channel init failed");
}
void ipc_debugfs_deinit(struct iosm_imem *ipc_imem)
{
ipc_trace_deinit(ipc_imem->trace);
debugfs_remove_recursive(ipc_imem->debugfs_dir);
wwan_put_debugfs_dir(ipc_imem->debugfs_wwan_dir);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_debugfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include "iosm_ipc_protocol.h"
/* Timeout value in MS for the PM to wait for device to reach active state */
#define IPC_PM_ACTIVE_TIMEOUT_MS (500)
/* Note that here "active" has the value 1, as compared to the enums
* ipc_mem_host_pm_state or ipc_mem_dev_pm_state, where "active" is 0
*/
#define IPC_PM_SLEEP (0)
#define CONSUME_STATE (0)
#define IPC_PM_ACTIVE (1)
void ipc_pm_signal_hpda_doorbell(struct iosm_pm *ipc_pm, u32 identifier,
bool host_slp_check)
{
if (host_slp_check && ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE &&
ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE_WAIT) {
ipc_pm->pending_hpda_update = true;
dev_dbg(ipc_pm->dev,
"Pend HPDA update set. Host PM_State: %d identifier:%d",
ipc_pm->host_pm_state, identifier);
return;
}
if (!ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, true)) {
ipc_pm->pending_hpda_update = true;
dev_dbg(ipc_pm->dev, "Pending HPDA update set. identifier:%d",
identifier);
return;
}
ipc_pm->pending_hpda_update = false;
/* Trigger the irq towards CP */
ipc_cp_irq_hpda_update(ipc_pm->pcie, identifier);
ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, false);
}
/* Wake up the device if it is in low power mode. */
static bool ipc_pm_link_activate(struct iosm_pm *ipc_pm)
{
if (ipc_pm->cp_state == IPC_MEM_DEV_PM_ACTIVE)
return true;
if (ipc_pm->cp_state == IPC_MEM_DEV_PM_SLEEP) {
if (ipc_pm->ap_state == IPC_MEM_DEV_PM_SLEEP) {
/* Wake up the device. */
ipc_cp_irq_sleep_control(ipc_pm->pcie,
IPC_MEM_DEV_PM_WAKEUP);
ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE_WAIT;
goto not_active;
}
if (ipc_pm->ap_state == IPC_MEM_DEV_PM_ACTIVE_WAIT)
goto not_active;
return true;
}
not_active:
/* link is not ready */
return false;
}
bool ipc_pm_wait_for_device_active(struct iosm_pm *ipc_pm)
{
bool ret_val = false;
if (ipc_pm->ap_state != IPC_MEM_DEV_PM_ACTIVE) {
/* Complete all memory stores before setting bit */
smp_mb__before_atomic();
/* Wait for IPC_PM_ACTIVE_TIMEOUT_MS for Device sleep state
* machine to enter ACTIVE state.
*/
set_bit(0, &ipc_pm->host_sleep_pend);
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
if (!wait_for_completion_interruptible_timeout
(&ipc_pm->host_sleep_complete,
msecs_to_jiffies(IPC_PM_ACTIVE_TIMEOUT_MS))) {
dev_err(ipc_pm->dev,
"PM timeout. Expected State:%d. Actual: %d",
IPC_MEM_DEV_PM_ACTIVE, ipc_pm->ap_state);
goto active_timeout;
}
}
ret_val = true;
active_timeout:
/* Complete all memory stores before clearing bit */
smp_mb__before_atomic();
/* Reset the atomic variable in any case as device sleep
* state machine change is no longer of interest.
*/
clear_bit(0, &ipc_pm->host_sleep_pend);
/* Complete all memory stores after clearing bit */
smp_mb__after_atomic();
return ret_val;
}
static void ipc_pm_on_link_sleep(struct iosm_pm *ipc_pm)
{
/* pending sleep ack and all conditions are cleared
* -> signal SLEEP__ACK to CP
*/
ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_SLEEP);
}
static void ipc_pm_on_link_wake(struct iosm_pm *ipc_pm, bool ack)
{
ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
if (ack) {
ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_ACTIVE);
/* check the consume state !!! */
if (test_bit(CONSUME_STATE, &ipc_pm->host_sleep_pend))
complete(&ipc_pm->host_sleep_complete);
}
/* Check for pending HPDA update.
* Pending HP update could be because of sending message was
* put on hold due to Device sleep state or due to TD update
* which could be because of Device Sleep and Host Sleep
* states.
*/
if (ipc_pm->pending_hpda_update &&
ipc_pm->host_pm_state == IPC_MEM_HOST_PM_ACTIVE)
ipc_pm_signal_hpda_doorbell(ipc_pm, IPC_HP_PM_TRIGGER, true);
}
bool ipc_pm_trigger(struct iosm_pm *ipc_pm, enum ipc_pm_unit unit, bool active)
{
union ipc_pm_cond old_cond;
union ipc_pm_cond new_cond;
bool link_active;
/* Save the current D3 state. */
new_cond = ipc_pm->pm_cond;
old_cond = ipc_pm->pm_cond;
/* Calculate the power state only in the runtime phase. */
switch (unit) {
case IPC_PM_UNIT_IRQ: /* CP irq */
new_cond.irq = active;
break;
case IPC_PM_UNIT_LINK: /* Device link state. */
new_cond.link = active;
break;
case IPC_PM_UNIT_HS: /* Host sleep trigger requires Link. */
new_cond.hs = active;
break;
default:
break;
}
/* Something changed ? */
if (old_cond.raw == new_cond.raw) {
/* Stay in the current PM state. */
link_active = old_cond.link == IPC_PM_ACTIVE;
goto ret;
}
ipc_pm->pm_cond = new_cond;
if (new_cond.link)
ipc_pm_on_link_wake(ipc_pm, unit == IPC_PM_UNIT_LINK);
else if (unit == IPC_PM_UNIT_LINK)
ipc_pm_on_link_sleep(ipc_pm);
if (old_cond.link == IPC_PM_SLEEP && new_cond.raw) {
link_active = ipc_pm_link_activate(ipc_pm);
goto ret;
}
link_active = old_cond.link == IPC_PM_ACTIVE;
ret:
return link_active;
}
bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm)
{
/* suspend not allowed if host_pm_state is not IPC_MEM_HOST_PM_ACTIVE */
if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE) {
dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
ipc_pm->host_pm_state, IPC_MEM_HOST_PM_ACTIVE);
return false;
}
ipc_pm->host_pm_state = IPC_MEM_HOST_PM_SLEEP_WAIT_D3;
return true;
}
bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm)
{
if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_SLEEP) {
dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
ipc_pm->host_pm_state, IPC_MEM_HOST_PM_SLEEP);
return false;
}
/* Sending Sleep Exit message to CP. Update the state */
ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE_WAIT;
return true;
}
void ipc_pm_set_s2idle_sleep(struct iosm_pm *ipc_pm, bool sleep)
{
if (sleep) {
ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_SLEEP;
} else {
ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_ACTIVE;
ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
}
}
bool ipc_pm_dev_slp_notification(struct iosm_pm *ipc_pm, u32 cp_pm_req)
{
if (cp_pm_req == ipc_pm->device_sleep_notification)
return false;
ipc_pm->device_sleep_notification = cp_pm_req;
/* Evaluate the PM request. */
switch (ipc_pm->cp_state) {
case IPC_MEM_DEV_PM_ACTIVE:
switch (cp_pm_req) {
case IPC_MEM_DEV_PM_ACTIVE:
break;
case IPC_MEM_DEV_PM_SLEEP:
/* Inform the PM that the device link can go down. */
ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, false);
return true;
default:
dev_err(ipc_pm->dev,
"loc-pm=%d active: confused req-pm=%d",
ipc_pm->cp_state, cp_pm_req);
break;
}
break;
case IPC_MEM_DEV_PM_SLEEP:
switch (cp_pm_req) {
case IPC_MEM_DEV_PM_ACTIVE:
/* Inform the PM that the device link is active. */
ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, true);
break;
case IPC_MEM_DEV_PM_SLEEP:
break;
default:
dev_err(ipc_pm->dev,
"loc-pm=%d sleep: confused req-pm=%d",
ipc_pm->cp_state, cp_pm_req);
break;
}
break;
default:
dev_err(ipc_pm->dev, "confused loc-pm=%d, req-pm=%d",
ipc_pm->cp_state, cp_pm_req);
break;
}
return false;
}
void ipc_pm_init(struct iosm_protocol *ipc_protocol)
{
struct iosm_imem *ipc_imem = ipc_protocol->imem;
struct iosm_pm *ipc_pm = &ipc_protocol->pm;
ipc_pm->pcie = ipc_imem->pcie;
ipc_pm->dev = ipc_imem->dev;
ipc_pm->pm_cond.irq = IPC_PM_SLEEP;
ipc_pm->pm_cond.hs = IPC_PM_SLEEP;
ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
/* Create generic wait-for-completion handler for Host Sleep
* and device sleep coordination.
*/
init_completion(&ipc_pm->host_sleep_complete);
/* Complete all memory stores before clearing bit */
smp_mb__before_atomic();
clear_bit(0, &ipc_pm->host_sleep_pend);
/* Complete all memory stores after clearing bit */
smp_mb__after_atomic();
}
void ipc_pm_deinit(struct iosm_protocol *proto)
{
struct iosm_pm *ipc_pm = &proto->pm;
complete(&ipc_pm->host_sleep_complete);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_pm.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/pm_runtime.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_port.h"
/* open logical channel for control communication */
static int ipc_port_ctrl_start(struct wwan_port *port)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
int ret = 0;
pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
ipc_port->chl_id,
IPC_HP_CDEV_OPEN);
if (!ipc_port->channel)
ret = -EIO;
pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
return ret;
}
/* close logical channel */
static void ipc_port_ctrl_stop(struct wwan_port *port)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
}
/* transfer control data to modem */
static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
int ret;
pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ret = ipc_imem_sys_cdev_write(ipc_port, skb);
pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
return ret;
}
static const struct wwan_port_ops ipc_wwan_ctrl_ops = {
.start = ipc_port_ctrl_start,
.stop = ipc_port_ctrl_stop,
.tx = ipc_port_ctrl_tx,
};
/* Port init func */
struct iosm_cdev *ipc_port_init(struct iosm_imem *ipc_imem,
struct ipc_chnl_cfg ipc_port_cfg)
{
struct iosm_cdev *ipc_port = kzalloc(sizeof(*ipc_port), GFP_KERNEL);
enum wwan_port_type port_type = ipc_port_cfg.wwan_port_type;
enum ipc_channel_id chl_id = ipc_port_cfg.id;
if (!ipc_port)
return NULL;
ipc_port->dev = ipc_imem->dev;
ipc_port->pcie = ipc_imem->pcie;
ipc_port->port_type = port_type;
ipc_port->chl_id = chl_id;
ipc_port->ipc_imem = ipc_imem;
ipc_port->iosm_port = wwan_create_port(ipc_port->dev, port_type,
&ipc_wwan_ctrl_ops, NULL,
ipc_port);
return ipc_port;
}
/* Port deinit func */
void ipc_port_deinit(struct iosm_cdev *port[])
{
struct iosm_cdev *ipc_port;
u8 ctrl_chl_nr;
for (ctrl_chl_nr = 0; ctrl_chl_nr < IPC_MEM_MAX_CHANNELS;
ctrl_chl_nr++) {
if (port[ctrl_chl_nr]) {
ipc_port = port[ctrl_chl_nr];
wwan_remove_port(ipc_port->iosm_port);
kfree(ipc_port);
}
}
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_port.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/if_link.h>
#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
#include <linux/wwan.h>
#include <net/pkt_sched.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_wwan.h"
#define IOSM_IP_TYPE_MASK 0xF0
#define IOSM_IP_TYPE_IPV4 0x40
#define IOSM_IP_TYPE_IPV6 0x60
/**
* struct iosm_netdev_priv - netdev WWAN driver specific private data
* @ipc_wwan: Pointer to iosm_wwan struct
* @netdev: Pointer to network interface device structure
* @if_id: Interface id for device.
* @ch_id: IPC channel number for which interface device is created.
*/
struct iosm_netdev_priv {
struct iosm_wwan *ipc_wwan;
struct net_device *netdev;
int if_id;
int ch_id;
};
/**
* struct iosm_wwan - This structure contains information about WWAN root device
* and interface to the IPC layer.
* @ipc_imem: Pointer to imem data-struct
* @sub_netlist: List of active netdevs
* @dev: Pointer device structure
*/
struct iosm_wwan {
struct iosm_imem *ipc_imem;
struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
struct device *dev;
};
/* Bring-up the wwan net link */
static int ipc_wwan_link_open(struct net_device *netdev)
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
int if_id = priv->if_id;
int ret = 0;
if (if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
pm_runtime_get_sync(ipc_wwan->ipc_imem->dev);
/* get channel id */
priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
if (priv->ch_id < 0) {
dev_err(ipc_wwan->dev,
"cannot connect wwan0 & id %d to the IPC mem layer",
if_id);
ret = -ENODEV;
goto err_out;
}
/* enable tx path, DL data may follow */
netif_start_queue(netdev);
dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
priv->ch_id, priv->if_id);
err_out:
pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
return ret;
}
/* Bring-down the wwan net link */
static int ipc_wwan_link_stop(struct net_device *netdev)
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
netif_stop_queue(netdev);
pm_runtime_get_sync(priv->ipc_wwan->ipc_imem->dev);
ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
priv->ch_id);
priv->ch_id = -1;
pm_runtime_mark_last_busy(priv->ipc_wwan->ipc_imem->dev);
pm_runtime_put_autosuspend(priv->ipc_wwan->ipc_imem->dev);
return 0;
}
/* Transmit a packet */
static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
unsigned int len = skb->len;
int if_id = priv->if_id;
int ret;
/* Interface IDs from 1 to 8 are for IP data
* & from 257 to 261 are for non-IP data
*/
if (if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
pm_runtime_get(ipc_wwan->ipc_imem->dev);
/* Send the SKB to device for transmission */
ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
if_id, priv->ch_id, skb);
/* Return code of zero is success */
if (ret == 0) {
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += len;
ret = NETDEV_TX_OK;
} else if (ret == -EBUSY) {
ret = NETDEV_TX_BUSY;
dev_err(ipc_wwan->dev, "unable to push packets");
} else {
pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
goto exit;
}
pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
return ret;
exit:
/* Log any skb drop */
if (if_id)
dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id,
ret);
dev_kfree_skb_any(skb);
netdev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
/* Ops structure for wwan net link */
static const struct net_device_ops ipc_inm_ops = {
.ndo_open = ipc_wwan_link_open,
.ndo_stop = ipc_wwan_link_stop,
.ndo_start_xmit = ipc_wwan_link_transmit,
};
/* Setup function for creating new net link */
static void ipc_wwan_setup(struct net_device *iosm_dev)
{
iosm_dev->header_ops = NULL;
iosm_dev->hard_header_len = 0;
iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
iosm_dev->type = ARPHRD_NONE;
iosm_dev->mtu = ETH_DATA_LEN;
iosm_dev->min_mtu = ETH_MIN_MTU;
iosm_dev->max_mtu = ETH_MAX_MTU;
iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
iosm_dev->needs_free_netdev = true;
iosm_dev->netdev_ops = &ipc_inm_ops;
}
/* Create new wwan net link */
static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
u32 if_id, struct netlink_ext_ack *extack)
{
struct iosm_wwan *ipc_wwan = ctxt;
struct iosm_netdev_priv *priv;
int err;
if (if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
priv = wwan_netdev_drvpriv(dev);
priv->if_id = if_id;
priv->netdev = dev;
priv->ipc_wwan = ipc_wwan;
if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id]))
return -EBUSY;
err = register_netdevice(dev);
if (err)
return err;
rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
netif_device_attach(dev);
return 0;
}
static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
struct list_head *head)
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev);
struct iosm_wwan *ipc_wwan = ctxt;
int if_id = priv->if_id;
if (WARN_ON(if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
return;
if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
return;
RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
/* unregistering includes synchronize_net() */
unregister_netdevice_queue(dev, head);
}
static const struct wwan_ops iosm_wwan_ops = {
.priv_size = sizeof(struct iosm_netdev_priv),
.setup = ipc_wwan_setup,
.newlink = ipc_wwan_newlink,
.dellink = ipc_wwan_dellink,
};
int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
bool dss, int if_id)
{
struct sk_buff *skb = skb_arg;
struct net_device_stats *stats;
struct iosm_netdev_priv *priv;
int ret;
if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4)
skb->protocol = htons(ETH_P_IP);
else if ((skb->data[0] & IOSM_IP_TYPE_MASK) ==
IOSM_IP_TYPE_IPV6)
skb->protocol = htons(ETH_P_IPV6);
skb->pkt_type = PACKET_HOST;
if (if_id < IP_MUX_SESSION_START ||
if_id > IP_MUX_SESSION_END) {
ret = -EINVAL;
goto free;
}
rcu_read_lock();
priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
if (!priv) {
ret = -EINVAL;
goto unlock;
}
skb->dev = priv->netdev;
stats = &priv->netdev->stats;
stats->rx_packets++;
stats->rx_bytes += skb->len;
ret = netif_rx(skb);
skb = NULL;
unlock:
rcu_read_unlock();
free:
dev_kfree_skb(skb);
return ret;
}
void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on)
{
struct net_device *netdev;
struct iosm_netdev_priv *priv;
bool is_tx_blk;
rcu_read_lock();
priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
if (!priv) {
rcu_read_unlock();
return;
}
netdev = priv->netdev;
is_tx_blk = netif_queue_stopped(netdev);
if (on)
dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable",
if_id);
if (on && !is_tx_blk)
netif_stop_queue(netdev);
else if (!on && is_tx_blk)
netif_wake_queue(netdev);
rcu_read_unlock();
}
struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
{
struct iosm_wwan *ipc_wwan;
ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL);
if (!ipc_wwan)
return NULL;
ipc_wwan->dev = dev;
ipc_wwan->ipc_imem = ipc_imem;
/* WWAN core will create a netdev for the default IP MUX channel */
if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan,
IP_MUX_SESSION_DEFAULT)) {
kfree(ipc_wwan);
return NULL;
}
return ipc_wwan;
}
void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
{
/* This call will remove all child netdev(s) */
wwan_unregister_ops(ipc_wwan->dev);
kfree(ipc_wwan);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_wwan.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
#include <linux/vmalloc.h>
#include "iosm_ipc_coredump.h"
/**
* ipc_coredump_collect - To collect coredump
* @devlink: Pointer to devlink instance.
* @data: Pointer to snapshot
* @entry: ID of requested snapshot
* @region_size: Region size
*
* Returns: 0 on success, error on failure
*/
int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry,
u32 region_size)
{
int ret, bytes_to_read, bytes_read = 0, i = 0;
s32 remaining;
u8 *data_ptr;
data_ptr = vmalloc(region_size);
if (!data_ptr)
return -ENOMEM;
remaining = devlink->cd_file_info[entry].actual_size;
ret = ipc_devlink_send_cmd(devlink, rpsi_cmd_coredump_get, entry);
if (ret) {
dev_err(devlink->dev, "Send coredump_get cmd failed");
goto get_cd_fail;
}
while (remaining > 0) {
bytes_to_read = min(remaining, MAX_DATA_SIZE);
bytes_read = 0;
ret = ipc_imem_sys_devlink_read(devlink, data_ptr + i,
bytes_to_read, &bytes_read);
if (ret) {
dev_err(devlink->dev, "CD data read failed");
goto get_cd_fail;
}
remaining -= bytes_read;
i += bytes_read;
}
*data = data_ptr;
return 0;
get_cd_fail:
vfree(data_ptr);
return ret;
}
/**
* ipc_coredump_get_list - Get coredump list from modem
* @devlink: Pointer to devlink instance.
* @cmd: RPSI command to be sent
*
* Returns: 0 on success, error on failure
*/
int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd)
{
u32 byte_read, num_entries, file_size;
struct iosm_cd_table *cd_table;
u8 size[MAX_SIZE_LEN], i;
char *filename;
int ret;
cd_table = kzalloc(MAX_CD_LIST_SIZE, GFP_KERNEL);
if (!cd_table) {
ret = -ENOMEM;
goto cd_init_fail;
}
ret = ipc_devlink_send_cmd(devlink, cmd, MAX_CD_LIST_SIZE);
if (ret) {
dev_err(devlink->dev, "rpsi_cmd_coredump_start failed");
goto cd_init_fail;
}
ret = ipc_imem_sys_devlink_read(devlink, (u8 *)cd_table,
MAX_CD_LIST_SIZE, &byte_read);
if (ret) {
dev_err(devlink->dev, "Coredump data is invalid");
goto cd_init_fail;
}
if (byte_read != MAX_CD_LIST_SIZE)
goto cd_init_fail;
if (cmd == rpsi_cmd_coredump_start) {
num_entries = le32_to_cpu(cd_table->list.num_entries);
if (num_entries == 0 || num_entries > IOSM_NOF_CD_REGION) {
ret = -EINVAL;
goto cd_init_fail;
}
for (i = 0; i < num_entries; i++) {
file_size = le32_to_cpu(cd_table->list.entry[i].size);
filename = cd_table->list.entry[i].filename;
if (file_size > devlink->cd_file_info[i].default_size) {
ret = -EINVAL;
goto cd_init_fail;
}
devlink->cd_file_info[i].actual_size = file_size;
dev_dbg(devlink->dev, "file: %s actual size %d",
filename, file_size);
devlink_flash_update_status_notify(devlink->devlink_ctx,
filename,
"FILENAME", 0, 0);
snprintf(size, sizeof(size), "%d", file_size);
devlink_flash_update_status_notify(devlink->devlink_ctx,
size, "FILE SIZE",
0, 0);
}
}
cd_init_fail:
kfree(cd_table);
return ret;
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_coredump.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include "iosm_ipc_pcie.h"
#include "iosm_ipc_protocol.h"
static void ipc_write_dbell_reg(struct iosm_pcie *ipc_pcie, int irq_n, u32 data)
{
void __iomem *write_reg;
/* Select the first doorbell register, which is only currently needed
* by CP.
*/
write_reg = (void __iomem *)((u8 __iomem *)ipc_pcie->ipc_regs +
ipc_pcie->doorbell_write +
(irq_n * ipc_pcie->doorbell_reg_offset));
/* Fire the doorbell irq by writing data on the doorbell write pointer
* register.
*/
iowrite32(data, write_reg);
}
void ipc_doorbell_fire(struct iosm_pcie *ipc_pcie, int irq_n, u32 data)
{
ipc_write_dbell_reg(ipc_pcie, irq_n, data);
}
/* Threaded Interrupt handler for MSI interrupts */
static irqreturn_t ipc_msi_interrupt(int irq, void *dev_id)
{
struct iosm_pcie *ipc_pcie = dev_id;
int instance = irq - ipc_pcie->pci->irq;
/* Shift the MSI irq actions to the IPC tasklet. IRQ_NONE means the
* irq was not from the IPC device or could not be served.
*/
if (instance >= ipc_pcie->nvec)
return IRQ_NONE;
if (!test_bit(0, &ipc_pcie->suspend))
ipc_imem_irq_process(ipc_pcie->imem, instance);
return IRQ_HANDLED;
}
void ipc_release_irq(struct iosm_pcie *ipc_pcie)
{
struct pci_dev *pdev = ipc_pcie->pci;
if (pdev->msi_enabled) {
while (--ipc_pcie->nvec >= 0)
free_irq(pdev->irq + ipc_pcie->nvec, ipc_pcie);
}
pci_free_irq_vectors(pdev);
}
int ipc_acquire_irq(struct iosm_pcie *ipc_pcie)
{
struct pci_dev *pdev = ipc_pcie->pci;
int i, rc = -EINVAL;
ipc_pcie->nvec = pci_alloc_irq_vectors(pdev, IPC_MSI_VECTORS,
IPC_MSI_VECTORS, PCI_IRQ_MSI);
if (ipc_pcie->nvec < 0) {
rc = ipc_pcie->nvec;
goto error;
}
if (!pdev->msi_enabled)
goto error;
for (i = 0; i < ipc_pcie->nvec; ++i) {
rc = request_threaded_irq(pdev->irq + i, NULL,
ipc_msi_interrupt, IRQF_ONESHOT,
KBUILD_MODNAME, ipc_pcie);
if (rc) {
dev_err(ipc_pcie->dev, "unable to grab IRQ, rc=%d", rc);
ipc_pcie->nvec = i;
ipc_release_irq(ipc_pcie);
goto error;
}
}
error:
return rc;
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_irq.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <net/rtnetlink.h>
#include "iosm_ipc_imem.h"
#include "iosm_ipc_pcie.h"
#include "iosm_ipc_protocol.h"
MODULE_DESCRIPTION("IOSM Driver");
MODULE_LICENSE("GPL v2");
/* WWAN GUID */
static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
{
/* Free the MSI resources. */
ipc_release_irq(ipc_pcie);
/* Free mapped doorbell scratchpad bus memory into CPU space. */
iounmap(ipc_pcie->scratchpad);
/* Free mapped IPC_REGS bus memory into CPU space. */
iounmap(ipc_pcie->ipc_regs);
/* Releases all PCI I/O and memory resources previously reserved by a
* successful call to pci_request_regions. Call this function only
* after all use of the PCI regions has ceased.
*/
pci_release_regions(ipc_pcie->pci);
}
static void ipc_pcie_cleanup(struct iosm_pcie *ipc_pcie)
{
/* Free the shared memory resources. */
ipc_imem_cleanup(ipc_pcie->imem);
ipc_pcie_resources_release(ipc_pcie);
/* Signal to the system that the PCI device is not in use. */
pci_disable_device(ipc_pcie->pci);
}
static void ipc_pcie_deinit(struct iosm_pcie *ipc_pcie)
{
kfree(ipc_pcie->imem);
kfree(ipc_pcie);
}
static void ipc_pcie_remove(struct pci_dev *pci)
{
struct iosm_pcie *ipc_pcie = pci_get_drvdata(pci);
ipc_pcie_cleanup(ipc_pcie);
ipc_pcie_deinit(ipc_pcie);
}
static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
{
struct pci_dev *pci = ipc_pcie->pci;
u32 cap = 0;
u32 ret;
/* Reserved PCI I/O and memory resources.
* Mark all PCI regions associated with PCI device pci as
* being reserved by owner IOSM_IPC.
*/
ret = pci_request_regions(pci, "IOSM_IPC");
if (ret) {
dev_err(ipc_pcie->dev, "failed pci request regions");
goto pci_request_region_fail;
}
/* Reserve the doorbell IPC REGS memory resources.
* Remap the memory into CPU space. Arrange for the physical address
* (BAR) to be visible from this driver.
* pci_ioremap_bar() ensures that the memory is marked uncachable.
*/
ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
if (!ipc_pcie->ipc_regs) {
dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
ret = -EBUSY;
goto ipc_regs_remap_fail;
}
/* Reserve the MMIO scratchpad memory resources.
* Remap the memory into CPU space. Arrange for the physical address
* (BAR) to be visible from this driver.
* pci_ioremap_bar() ensures that the memory is marked uncachable.
*/
ipc_pcie->scratchpad =
pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
if (!ipc_pcie->scratchpad) {
dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
ret = -EBUSY;
goto scratch_remap_fail;
}
/* Install the irq handler triggered by CP. */
ret = ipc_acquire_irq(ipc_pcie);
if (ret) {
dev_err(ipc_pcie->dev, "acquiring MSI irq failed!");
goto irq_acquire_fail;
}
/* Enable bus-mastering for the IOSM IPC device. */
pci_set_master(pci);
/* Enable LTR if possible
* This is needed for L1.2!
*/
pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap);
if (cap & PCI_EXP_DEVCAP2_LTR)
pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_LTR_EN);
dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
return ret;
irq_acquire_fail:
iounmap(ipc_pcie->scratchpad);
scratch_remap_fail:
iounmap(ipc_pcie->ipc_regs);
ipc_regs_remap_fail:
pci_release_regions(pci);
pci_request_region_fail:
return ret;
}
bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
bool parent)
{
struct pci_dev *pdev;
u16 value = 0;
u32 enabled;
if (parent)
pdev = ipc_pcie->pci->bus->self;
else
pdev = ipc_pcie->pci;
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &value);
enabled = value & PCI_EXP_LNKCTL_ASPMC;
dev_dbg(ipc_pcie->dev, "ASPM L1: 0x%04X 0x%03X", pdev->device, value);
return (enabled == PCI_EXP_LNKCTL_ASPM_L1 ||
enabled == PCI_EXP_LNKCTL_ASPMC);
}
bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie)
{
struct pci_dev *parent;
u16 link_status = 0;
if (!ipc_pcie->pci->bus || !ipc_pcie->pci->bus->self) {
dev_err(ipc_pcie->dev, "root port not found");
return false;
}
parent = ipc_pcie->pci->bus->self;
pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &link_status);
dev_dbg(ipc_pcie->dev, "Link status: 0x%04X", link_status);
return link_status & PCI_EXP_LNKSTA_DLLLA;
}
static bool ipc_pcie_check_aspm_supported(struct iosm_pcie *ipc_pcie,
bool parent)
{
struct pci_dev *pdev;
u32 support;
u32 cap = 0;
if (parent)
pdev = ipc_pcie->pci->bus->self;
else
pdev = ipc_pcie->pci;
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &cap);
support = u32_get_bits(cap, PCI_EXP_LNKCAP_ASPMS);
if (support < PCI_EXP_LNKCTL_ASPM_L1) {
dev_dbg(ipc_pcie->dev, "ASPM L1 not supported: 0x%04X",
pdev->device);
return false;
}
return true;
}
void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie)
{
bool parent_aspm_enabled, dev_aspm_enabled;
/* check if both root port and child supports ASPM L1 */
if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
!ipc_pcie_check_aspm_supported(ipc_pcie, false))
return;
parent_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, true);
dev_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, false);
dev_dbg(ipc_pcie->dev, "ASPM parent: %s device: %s",
parent_aspm_enabled ? "Enabled" : "Disabled",
dev_aspm_enabled ? "Enabled" : "Disabled");
}
/* Initializes PCIe endpoint configuration */
static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
{
/* BAR0 is used for doorbell */
ipc_pcie->ipc_regs_bar_nr = IPC_DOORBELL_BAR0;
/* update HW configuration */
ipc_pcie->scratchpad_bar_nr = IPC_SCRATCHPAD_BAR2;
ipc_pcie->doorbell_reg_offset = IPC_DOORBELL_CH_OFFSET;
ipc_pcie->doorbell_write = IPC_WRITE_PTR_REG_0;
ipc_pcie->doorbell_capture = IPC_CAPTURE_PTR_REG_0;
}
/* This will read the BIOS WWAN RTD3 settings:
* D0L1.2/D3L2/Disabled
*/
static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
{
enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12;
union acpi_object *object;
acpi_handle handle_acpi;
handle_acpi = ACPI_HANDLE(dev);
if (!handle_acpi) {
pr_debug("pci device is NOT ACPI supporting device\n");
goto default_ret;
}
object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
if (!object)
goto default_ret;
if (object->integer.value == 3)
sleep_state = IPC_PCIE_D3L2;
ACPI_FREE(object);
default_ret:
return sleep_state;
}
static int ipc_pcie_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
int ret;
pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
pci_id->vendor);
if (!ipc_pcie)
goto ret_fail;
/* Initialize ipc dbg component for the PCIe device */
ipc_pcie->dev = &pci->dev;
/* Set the driver specific data. */
pci_set_drvdata(pci, ipc_pcie);
/* Save the address of the PCI device configuration. */
ipc_pcie->pci = pci;
/* Update platform configuration */
ipc_pcie_config_init(ipc_pcie);
/* Initialize the device before it is used. Ask low-level code
* to enable I/O and memory. Wake up the device if it was suspended.
*/
if (pci_enable_device(pci)) {
dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device");
/* If enable of PCIe device has failed then calling
* ipc_pcie_cleanup will panic the system. More over
* ipc_pcie_cleanup() is required to be called after
* ipc_imem_mount()
*/
goto pci_enable_fail;
}
ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret);
goto set_mask_fail;
}
ipc_pcie_config_aspm(ipc_pcie);
dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
/* Read WWAN RTD3 BIOS Setting
*/
ipc_pcie->d3l2_support = ipc_pcie_read_bios_cfg(&pci->dev);
ipc_pcie->suspend = 0;
if (ipc_pcie_resources_request(ipc_pcie))
goto resources_req_fail;
/* Establish the link to the imem layer. */
ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
ipc_pcie->scratchpad, ipc_pcie->dev);
if (!ipc_pcie->imem) {
dev_err(ipc_pcie->dev, "failed to init imem");
goto imem_init_fail;
}
return 0;
imem_init_fail:
ipc_pcie_resources_release(ipc_pcie);
resources_req_fail:
set_mask_fail:
pci_disable_device(pci);
pci_enable_fail:
kfree(ipc_pcie);
ret_fail:
return -EIO;
}
static const struct pci_device_id iosm_ipc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
{}
};
MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
/* Enter sleep in s2idle case
*/
static int __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
{
ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
/* Complete all memory stores before setting bit */
smp_mb__before_atomic();
set_bit(0, &ipc_pcie->suspend);
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
return 0;
}
/* Resume from sleep in s2idle case
*/
static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
{
ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
/* Complete all memory stores before clearing bit. */
smp_mb__before_atomic();
clear_bit(0, &ipc_pcie->suspend);
/* Complete all memory stores after clearing bit. */
smp_mb__after_atomic();
return 0;
}
int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
{
/* The HAL shall ask the shared memory layer whether D3 is allowed. */
ipc_imem_pm_suspend(ipc_pcie->imem);
dev_dbg(ipc_pcie->dev, "SUSPEND done");
return 0;
}
int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
{
/* The HAL shall inform the shared memory layer that the device is
* active.
*/
ipc_imem_pm_resume(ipc_pcie->imem);
dev_dbg(ipc_pcie->dev, "RESUME done");
return 0;
}
static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
{
struct iosm_pcie *ipc_pcie;
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
ipc_pcie = pci_get_drvdata(pdev);
switch (ipc_pcie->d3l2_support) {
case IPC_PCIE_D0L12:
ipc_pcie_suspend_s2idle(ipc_pcie);
break;
case IPC_PCIE_D3L2:
ipc_pcie_suspend(ipc_pcie);
break;
}
return 0;
}
static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
{
struct iosm_pcie *ipc_pcie;
struct pci_dev *pdev;
pdev = to_pci_dev(dev);
ipc_pcie = pci_get_drvdata(pdev);
switch (ipc_pcie->d3l2_support) {
case IPC_PCIE_D0L12:
ipc_pcie_resume_s2idle(ipc_pcie);
break;
case IPC_PCIE_D3L2:
ipc_pcie_resume(ipc_pcie);
break;
}
return 0;
}
static DEFINE_RUNTIME_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb,
ipc_pcie_resume_cb, NULL);
static struct pci_driver iosm_ipc_driver = {
.name = KBUILD_MODNAME,
.probe = ipc_pcie_probe,
.remove = ipc_pcie_remove,
.driver = {
.pm = &iosm_ipc_pm,
},
.id_table = iosm_ipc_ids,
};
module_pci_driver(iosm_ipc_driver);
int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
size_t size, dma_addr_t *mapping, int direction)
{
if (ipc_pcie->pci) {
*mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
direction);
if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
dev_err(ipc_pcie->dev, "dma mapping failed");
return -EINVAL;
}
}
return 0;
}
void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
dma_addr_t mapping, int direction)
{
if (!mapping)
return;
if (ipc_pcie->pci)
dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
}
struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
gfp_t flags, size_t size)
{
struct sk_buff *skb;
if (!ipc_pcie || !size) {
pr_err("invalid pcie object or size");
return NULL;
}
skb = __netdev_alloc_skb(NULL, size, flags);
if (!skb)
return NULL;
IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
IPC_CB(skb)->mapping = 0;
return skb;
}
struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
gfp_t flags, dma_addr_t *mapping,
int direction, size_t headroom)
{
struct sk_buff *skb = ipc_pcie_alloc_local_skb(ipc_pcie, flags,
size + headroom);
if (!skb)
return NULL;
if (headroom)
skb_reserve(skb, headroom);
if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
dev_kfree_skb(skb);
return NULL;
}
BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
/* Store the mapping address in skb scratch pad for later usage */
IPC_CB(skb)->mapping = *mapping;
IPC_CB(skb)->direction = direction;
IPC_CB(skb)->len = size;
return skb;
}
void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
{
if (!skb)
return;
ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
IPC_CB(skb)->direction);
IPC_CB(skb)->mapping = 0;
dev_kfree_skb(skb);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_pcie.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include "iosm_ipc_protocol.h"
#include "iosm_ipc_protocol_ops.h"
/* Get the next free message element.*/
static union ipc_mem_msg_entry *
ipc_protocol_free_msg_get(struct iosm_protocol *ipc_protocol, int *index)
{
u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
union ipc_mem_msg_entry *msg;
if (new_head == le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail)) {
dev_err(ipc_protocol->dev, "message ring is full");
return NULL;
}
/* Get the pointer to the next free message element,
* reset the fields and mark is as invalid.
*/
msg = &ipc_protocol->p_ap_shm->msg_ring[head];
memset(msg, 0, sizeof(*msg));
/* return index in message ring */
*index = head;
return msg;
}
/* Updates the message ring Head pointer */
void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem)
{
struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
/* Update head pointer and fire doorbell. */
ipc_protocol->p_ap_shm->msg_head = cpu_to_le32(new_head);
ipc_protocol->old_msg_tail =
le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, IPC_HP_MR, false);
}
/* Allocate and prepare a OPEN_PIPE message.
* This also allocates the memory for the new TDR structure and
* updates the pipe structure referenced in the preparation arguments.
*/
static int ipc_protocol_msg_prepipe_open(struct iosm_protocol *ipc_protocol,
union ipc_msg_prep_args *args)
{
int index;
union ipc_mem_msg_entry *msg =
ipc_protocol_free_msg_get(ipc_protocol, &index);
struct ipc_pipe *pipe = args->pipe_open.pipe;
struct ipc_protocol_td *tdr;
struct sk_buff **skbr;
if (!msg) {
dev_err(ipc_protocol->dev, "failed to get free message");
return -EIO;
}
/* Allocate the skbuf elements for the skbuf which are on the way.
* SKB ring is internal memory allocation for driver. No need to
* re-calculate the start and end addresses.
*/
skbr = kcalloc(pipe->nr_of_entries, sizeof(*skbr), GFP_ATOMIC);
if (!skbr)
return -ENOMEM;
/* Allocate the transfer descriptors for the pipe. */
tdr = dma_alloc_coherent(&ipc_protocol->pcie->pci->dev,
pipe->nr_of_entries * sizeof(*tdr),
&pipe->phy_tdr_start, GFP_ATOMIC);
if (!tdr) {
kfree(skbr);
dev_err(ipc_protocol->dev, "tdr alloc error");
return -ENOMEM;
}
pipe->max_nr_of_queued_entries = pipe->nr_of_entries - 1;
pipe->nr_of_queued_entries = 0;
pipe->tdr_start = tdr;
pipe->skbr_start = skbr;
pipe->old_tail = 0;
ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
msg->open_pipe.type_of_message = IPC_MEM_MSG_OPEN_PIPE;
msg->open_pipe.pipe_nr = pipe->pipe_nr;
msg->open_pipe.tdr_addr = cpu_to_le64(pipe->phy_tdr_start);
msg->open_pipe.tdr_entries = cpu_to_le16(pipe->nr_of_entries);
msg->open_pipe.accumulation_backoff =
cpu_to_le32(pipe->accumulation_backoff);
msg->open_pipe.irq_vector = cpu_to_le32(pipe->irq);
return index;
}
static int ipc_protocol_msg_prepipe_close(struct iosm_protocol *ipc_protocol,
union ipc_msg_prep_args *args)
{
int index = -1;
union ipc_mem_msg_entry *msg =
ipc_protocol_free_msg_get(ipc_protocol, &index);
struct ipc_pipe *pipe = args->pipe_close.pipe;
if (!msg)
return -EIO;
msg->close_pipe.type_of_message = IPC_MEM_MSG_CLOSE_PIPE;
msg->close_pipe.pipe_nr = pipe->pipe_nr;
dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_CLOSE_PIPE(pipe_nr=%d)",
msg->close_pipe.pipe_nr);
return index;
}
static int ipc_protocol_msg_prep_sleep(struct iosm_protocol *ipc_protocol,
union ipc_msg_prep_args *args)
{
int index = -1;
union ipc_mem_msg_entry *msg =
ipc_protocol_free_msg_get(ipc_protocol, &index);
if (!msg) {
dev_err(ipc_protocol->dev, "failed to get free message");
return -EIO;
}
/* Prepare and send the host sleep message to CP to enter or exit D3. */
msg->host_sleep.type_of_message = IPC_MEM_MSG_SLEEP;
msg->host_sleep.target = args->sleep.target; /* 0=host, 1=device */
/* state; 0=enter, 1=exit 2=enter w/o protocol */
msg->host_sleep.state = args->sleep.state;
dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_SLEEP(target=%d; state=%d)",
msg->host_sleep.target, msg->host_sleep.state);
return index;
}
static int ipc_protocol_msg_prep_feature_set(struct iosm_protocol *ipc_protocol,
union ipc_msg_prep_args *args)
{
int index = -1;
union ipc_mem_msg_entry *msg =
ipc_protocol_free_msg_get(ipc_protocol, &index);
if (!msg) {
dev_err(ipc_protocol->dev, "failed to get free message");
return -EIO;
}
msg->feature_set.type_of_message = IPC_MEM_MSG_FEATURE_SET;
msg->feature_set.reset_enable = args->feature_set.reset_enable <<
RESET_BIT;
dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_FEATURE_SET(reset_enable=%d)",
msg->feature_set.reset_enable >> RESET_BIT);
return index;
}
/* Processes the message consumed by CP. */
bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq)
{
struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
struct ipc_rsp **rsp_ring = ipc_protocol->rsp_ring;
bool msg_processed = false;
u32 i;
if (le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail) >=
IPC_MEM_MSG_ENTRIES) {
dev_err(ipc_protocol->dev, "msg_tail out of range: %d",
le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail));
return msg_processed;
}
if (irq != IMEM_IRQ_DONT_CARE &&
irq != ipc_protocol->p_ap_shm->ci.msg_irq_vector)
return msg_processed;
for (i = ipc_protocol->old_msg_tail;
i != le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
i = (i + 1) % IPC_MEM_MSG_ENTRIES) {
union ipc_mem_msg_entry *msg =
&ipc_protocol->p_ap_shm->msg_ring[i];
dev_dbg(ipc_protocol->dev, "msg[%d]: type=%u status=%d", i,
msg->common.type_of_message,
msg->common.completion_status);
/* Update response with status and wake up waiting requestor */
if (rsp_ring[i]) {
rsp_ring[i]->status =
le32_to_cpu(msg->common.completion_status);
complete(&rsp_ring[i]->completion);
rsp_ring[i] = NULL;
}
msg_processed = true;
}
ipc_protocol->old_msg_tail = i;
return msg_processed;
}
/* Sends data from UL list to CP for the provided pipe by updating the Head
* pointer of given pipe.
*/
bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
struct ipc_pipe *pipe,
struct sk_buff_head *p_ul_list)
{
struct ipc_protocol_td *td;
bool hpda_pending = false;
struct sk_buff *skb;
s32 free_elements;
u32 head;
u32 tail;
if (!ipc_protocol->p_ap_shm) {
dev_err(ipc_protocol->dev, "driver is not initialized");
return false;
}
/* Get head and tail of the td list and calculate
* the number of free elements.
*/
head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
tail = pipe->old_tail;
while (!skb_queue_empty(p_ul_list)) {
if (head < tail)
free_elements = tail - head - 1;
else
free_elements =
pipe->nr_of_entries - head + ((s32)tail - 1);
if (free_elements <= 0) {
dev_dbg(ipc_protocol->dev,
"no free td elements for UL pipe %d",
pipe->pipe_nr);
break;
}
/* Get the td address. */
td = &pipe->tdr_start[head];
/* Take the first element of the uplink list and add it
* to the td list.
*/
skb = skb_dequeue(p_ul_list);
if (WARN_ON(!skb))
break;
/* Save the reference to the uplink skbuf. */
pipe->skbr_start[head] = skb;
td->buffer.address = IPC_CB(skb)->mapping;
td->scs = cpu_to_le32(skb->len) & cpu_to_le32(SIZE_MASK);
td->next = 0;
pipe->nr_of_queued_entries++;
/* Calculate the new head and save it. */
head++;
if (head >= pipe->nr_of_entries)
head = 0;
ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
cpu_to_le32(head);
}
if (pipe->old_head != head) {
dev_dbg(ipc_protocol->dev, "New UL TDs Pipe:%d", pipe->pipe_nr);
pipe->old_head = head;
/* Trigger doorbell because of pending UL packets. */
hpda_pending = true;
}
return hpda_pending;
}
/* Checks for Tail pointer update from CP and returns the data as SKB. */
struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
struct ipc_pipe *pipe)
{
struct ipc_protocol_td *p_td = &pipe->tdr_start[pipe->old_tail];
struct sk_buff *skb = pipe->skbr_start[pipe->old_tail];
pipe->nr_of_queued_entries--;
pipe->old_tail++;
if (pipe->old_tail >= pipe->nr_of_entries)
pipe->old_tail = 0;
if (!p_td->buffer.address) {
dev_err(ipc_protocol->dev, "Td buffer address is NULL");
return NULL;
}
if (p_td->buffer.address != IPC_CB(skb)->mapping) {
dev_err(ipc_protocol->dev,
"pipe %d: invalid buf_addr or skb_data",
pipe->pipe_nr);
return NULL;
}
return skb;
}
/* Allocates an SKB for CP to send data and updates the Head Pointer
* of the given Pipe#.
*/
bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
struct ipc_pipe *pipe)
{
struct ipc_protocol_td *td;
dma_addr_t mapping = 0;
u32 head, new_head;
struct sk_buff *skb;
u32 tail;
/* Get head and tail of the td list and calculate
* the number of free elements.
*/
head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
tail = le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
new_head = head + 1;
if (new_head >= pipe->nr_of_entries)
new_head = 0;
if (new_head == tail)
return false;
/* Get the td address. */
td = &pipe->tdr_start[head];
/* Allocate the skbuf for the descriptor. */
skb = ipc_pcie_alloc_skb(ipc_protocol->pcie, pipe->buf_size, GFP_ATOMIC,
&mapping, DMA_FROM_DEVICE,
IPC_MEM_DL_ETH_OFFSET);
if (!skb)
return false;
td->buffer.address = mapping;
td->scs = cpu_to_le32(pipe->buf_size) & cpu_to_le32(SIZE_MASK);
td->next = 0;
/* store the new head value. */
ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
cpu_to_le32(new_head);
/* Save the reference to the skbuf. */
pipe->skbr_start[head] = skb;
pipe->nr_of_queued_entries++;
return true;
}
/* Processes DL TD's */
struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
struct ipc_pipe *pipe)
{
struct ipc_protocol_td *p_td;
struct sk_buff *skb;
if (!pipe->tdr_start)
return NULL;
/* Copy the reference to the downlink buffer. */
p_td = &pipe->tdr_start[pipe->old_tail];
skb = pipe->skbr_start[pipe->old_tail];
/* Reset the ring elements. */
pipe->skbr_start[pipe->old_tail] = NULL;
pipe->nr_of_queued_entries--;
pipe->old_tail++;
if (pipe->old_tail >= pipe->nr_of_entries)
pipe->old_tail = 0;
if (!skb) {
dev_err(ipc_protocol->dev, "skb is null");
goto ret;
} else if (!p_td->buffer.address) {
dev_err(ipc_protocol->dev, "td/buffer address is null");
ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
skb = NULL;
goto ret;
}
if (p_td->buffer.address != IPC_CB(skb)->mapping) {
dev_err(ipc_protocol->dev, "invalid buf=%llx or skb=%p",
(unsigned long long)p_td->buffer.address, skb->data);
ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
skb = NULL;
goto ret;
} else if ((le32_to_cpu(p_td->scs) & SIZE_MASK) > pipe->buf_size) {
dev_err(ipc_protocol->dev, "invalid buffer size %d > %d",
le32_to_cpu(p_td->scs) & SIZE_MASK,
pipe->buf_size);
ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
skb = NULL;
goto ret;
} else if (le32_to_cpu(p_td->scs) >> COMPLETION_STATUS ==
IPC_MEM_TD_CS_ABORT) {
/* Discard aborted buffers. */
dev_dbg(ipc_protocol->dev, "discard 'aborted' buffers");
ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
skb = NULL;
goto ret;
}
/* Set the length field in skbuf. */
skb_put(skb, le32_to_cpu(p_td->scs) & SIZE_MASK);
ret:
return skb;
}
void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
struct ipc_pipe *pipe, u32 *head,
u32 *tail)
{
struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
if (head)
*head = le32_to_cpu(ipc_ap_shm->head_array[pipe->pipe_nr]);
if (tail)
*tail = le32_to_cpu(ipc_ap_shm->tail_array[pipe->pipe_nr]);
}
/* Frees the TDs given to CP. */
void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
struct ipc_pipe *pipe)
{
struct sk_buff *skb;
u32 head;
u32 tail;
/* Get the start and the end of the buffer list. */
head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
tail = pipe->old_tail;
/* Reset tail and head to 0. */
ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr] = 0;
ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
/* Free pending uplink and downlink buffers. */
if (pipe->skbr_start) {
while (head != tail) {
/* Get the reference to the skbuf,
* which is on the way and free it.
*/
skb = pipe->skbr_start[tail];
if (skb)
ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
tail++;
if (tail >= pipe->nr_of_entries)
tail = 0;
}
kfree(pipe->skbr_start);
pipe->skbr_start = NULL;
}
pipe->old_tail = 0;
/* Free and reset the td and skbuf circular buffers. kfree is save! */
if (pipe->tdr_start) {
dma_free_coherent(&ipc_protocol->pcie->pci->dev,
sizeof(*pipe->tdr_start) * pipe->nr_of_entries,
pipe->tdr_start, pipe->phy_tdr_start);
pipe->tdr_start = NULL;
}
}
enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
*ipc_protocol)
{
return (enum ipc_mem_device_ipc_state)
le32_to_cpu(ipc_protocol->p_ap_shm->device_info.ipc_status);
}
enum ipc_mem_exec_stage
ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol)
{
return le32_to_cpu(ipc_protocol->p_ap_shm->device_info.execution_stage);
}
int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
enum ipc_msg_prep_type msg_type,
union ipc_msg_prep_args *args)
{
struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
switch (msg_type) {
case IPC_MSG_PREP_SLEEP:
return ipc_protocol_msg_prep_sleep(ipc_protocol, args);
case IPC_MSG_PREP_PIPE_OPEN:
return ipc_protocol_msg_prepipe_open(ipc_protocol, args);
case IPC_MSG_PREP_PIPE_CLOSE:
return ipc_protocol_msg_prepipe_close(ipc_protocol, args);
case IPC_MSG_PREP_FEATURE_SET:
return ipc_protocol_msg_prep_feature_set(ipc_protocol, args);
/* Unsupported messages in protocol */
case IPC_MSG_PREP_MAP:
case IPC_MSG_PREP_UNMAP:
default:
dev_err(ipc_protocol->dev,
"unsupported message type: %d in protocol", msg_type);
return -EINVAL;
}
}
u32
ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol *ipc_protocol)
{
struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
return le32_to_cpu(ipc_ap_shm->device_info.device_sleep_notification);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include "iosm_ipc_imem.h"
#include "iosm_ipc_protocol.h"
#include "iosm_ipc_protocol_ops.h"
#include "iosm_ipc_pm.h"
#include "iosm_ipc_task_queue.h"
int ipc_protocol_tq_msg_send(struct iosm_protocol *ipc_protocol,
enum ipc_msg_prep_type msg_type,
union ipc_msg_prep_args *prep_args,
struct ipc_rsp *response)
{
int index = ipc_protocol_msg_prep(ipc_protocol->imem, msg_type,
prep_args);
/* Store reference towards caller specified response in response ring
* and signal CP
*/
if (index >= 0 && index < IPC_MEM_MSG_ENTRIES) {
ipc_protocol->rsp_ring[index] = response;
ipc_protocol_msg_hp_update(ipc_protocol->imem);
}
return index;
}
/* Callback for message send */
static int ipc_protocol_tq_msg_send_cb(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
struct ipc_call_msg_send_args *send_args = msg;
struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
return ipc_protocol_tq_msg_send(ipc_protocol, send_args->msg_type,
send_args->prep_args,
send_args->response);
}
/* Remove reference to a response. This is typically used when a requestor timed
* out and is no longer interested in the response.
*/
static int ipc_protocol_tq_msg_remove(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
ipc_protocol->rsp_ring[arg] = NULL;
return 0;
}
int ipc_protocol_msg_send(struct iosm_protocol *ipc_protocol,
enum ipc_msg_prep_type prep,
union ipc_msg_prep_args *prep_args)
{
struct ipc_call_msg_send_args send_args;
unsigned int exec_timeout;
struct ipc_rsp response;
int index;
exec_timeout = (ipc_protocol_get_ap_exec_stage(ipc_protocol) ==
IPC_MEM_EXEC_STAGE_RUN ?
IPC_MSG_COMPLETE_RUN_DEFAULT_TIMEOUT :
IPC_MSG_COMPLETE_BOOT_DEFAULT_TIMEOUT);
/* Trap if called from non-preemptible context */
might_sleep();
response.status = IPC_MEM_MSG_CS_INVALID;
init_completion(&response.completion);
send_args.msg_type = prep;
send_args.prep_args = prep_args;
send_args.response = &response;
/* Allocate and prepare message to be sent in tasklet context.
* A positive index returned form tasklet_call references the message
* in case it needs to be cancelled when there is a timeout.
*/
index = ipc_task_queue_send_task(ipc_protocol->imem,
ipc_protocol_tq_msg_send_cb, 0,
&send_args, 0, true);
if (index < 0) {
dev_err(ipc_protocol->dev, "msg %d failed", prep);
return index;
}
/* Wait for the device to respond to the message */
switch (wait_for_completion_timeout(&response.completion,
msecs_to_jiffies(exec_timeout))) {
case 0:
/* Timeout, there was no response from the device.
* Remove the reference to the local response completion
* object as we are no longer interested in the response.
*/
ipc_task_queue_send_task(ipc_protocol->imem,
ipc_protocol_tq_msg_remove, index,
NULL, 0, true);
dev_err(ipc_protocol->dev, "msg timeout");
ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
break;
default:
/* We got a response in time; check completion status: */
if (response.status != IPC_MEM_MSG_CS_SUCCESS) {
dev_err(ipc_protocol->dev,
"msg completion status error %d",
response.status);
return -EIO;
}
}
return 0;
}
static int ipc_protocol_msg_send_host_sleep(struct iosm_protocol *ipc_protocol,
u32 state)
{
union ipc_msg_prep_args prep_args = {
.sleep.target = 0,
.sleep.state = state,
};
return ipc_protocol_msg_send(ipc_protocol, IPC_MSG_PREP_SLEEP,
&prep_args);
}
void ipc_protocol_doorbell_trigger(struct iosm_protocol *ipc_protocol,
u32 identifier)
{
ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, identifier, true);
}
bool ipc_protocol_pm_dev_sleep_handle(struct iosm_protocol *ipc_protocol)
{
u32 ipc_status = ipc_protocol_get_ipc_status(ipc_protocol);
u32 requested;
if (ipc_status != IPC_MEM_DEVICE_IPC_RUNNING) {
dev_err(ipc_protocol->dev,
"irq ignored, CP IPC state is %d, should be RUNNING",
ipc_status);
/* Stop further processing. */
return false;
}
/* Get a copy of the requested PM state by the device and the local
* device PM state.
*/
requested = ipc_protocol_pm_dev_get_sleep_notification(ipc_protocol);
return ipc_pm_dev_slp_notification(&ipc_protocol->pm, requested);
}
static int ipc_protocol_tq_wakeup_dev_slp(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
struct iosm_pm *ipc_pm = &ipc_imem->ipc_protocol->pm;
/* Wakeup from device sleep if it is not ACTIVE */
ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, true);
ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, false);
return 0;
}
void ipc_protocol_s2idle_sleep(struct iosm_protocol *ipc_protocol, bool sleep)
{
ipc_pm_set_s2idle_sleep(&ipc_protocol->pm, sleep);
}
bool ipc_protocol_suspend(struct iosm_protocol *ipc_protocol)
{
if (!ipc_pm_prepare_host_sleep(&ipc_protocol->pm))
goto err;
ipc_task_queue_send_task(ipc_protocol->imem,
ipc_protocol_tq_wakeup_dev_slp, 0, NULL, 0,
true);
if (!ipc_pm_wait_for_device_active(&ipc_protocol->pm)) {
ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
goto err;
}
/* Send the sleep message for sync sys calls. */
dev_dbg(ipc_protocol->dev, "send TARGET_HOST, ENTER_SLEEP");
if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
IPC_HOST_SLEEP_ENTER_SLEEP)) {
/* Sending ENTER_SLEEP message failed, we are still active */
ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
goto err;
}
ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
return true;
err:
return false;
}
bool ipc_protocol_resume(struct iosm_protocol *ipc_protocol)
{
if (!ipc_pm_prepare_host_active(&ipc_protocol->pm))
return false;
dev_dbg(ipc_protocol->dev, "send TARGET_HOST, EXIT_SLEEP");
if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
IPC_HOST_SLEEP_EXIT_SLEEP)) {
ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
return false;
}
ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
return true;
}
struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem)
{
struct iosm_protocol *ipc_protocol =
kzalloc(sizeof(*ipc_protocol), GFP_KERNEL);
struct ipc_protocol_context_info *p_ci;
u64 addr;
if (!ipc_protocol)
return NULL;
ipc_protocol->dev = ipc_imem->dev;
ipc_protocol->pcie = ipc_imem->pcie;
ipc_protocol->imem = ipc_imem;
ipc_protocol->p_ap_shm = NULL;
ipc_protocol->phy_ap_shm = 0;
ipc_protocol->old_msg_tail = 0;
ipc_protocol->p_ap_shm =
dma_alloc_coherent(&ipc_protocol->pcie->pci->dev,
sizeof(*ipc_protocol->p_ap_shm),
&ipc_protocol->phy_ap_shm, GFP_KERNEL);
if (!ipc_protocol->p_ap_shm) {
dev_err(ipc_protocol->dev, "pci shm alloc error");
kfree(ipc_protocol);
return NULL;
}
/* Prepare the context info for CP. */
addr = ipc_protocol->phy_ap_shm;
p_ci = &ipc_protocol->p_ap_shm->ci;
p_ci->device_info_addr =
addr + offsetof(struct ipc_protocol_ap_shm, device_info);
p_ci->head_array =
addr + offsetof(struct ipc_protocol_ap_shm, head_array);
p_ci->tail_array =
addr + offsetof(struct ipc_protocol_ap_shm, tail_array);
p_ci->msg_head = addr + offsetof(struct ipc_protocol_ap_shm, msg_head);
p_ci->msg_tail = addr + offsetof(struct ipc_protocol_ap_shm, msg_tail);
p_ci->msg_ring_addr =
addr + offsetof(struct ipc_protocol_ap_shm, msg_ring);
p_ci->msg_ring_entries = cpu_to_le16(IPC_MEM_MSG_ENTRIES);
p_ci->msg_irq_vector = IPC_MSG_IRQ_VECTOR;
p_ci->device_info_irq_vector = IPC_DEVICE_IRQ_VECTOR;
ipc_mmio_set_contex_info_addr(ipc_imem->mmio, addr);
ipc_pm_init(ipc_protocol);
return ipc_protocol;
}
void ipc_protocol_deinit(struct iosm_protocol *proto)
{
dma_free_coherent(&proto->pcie->pci->dev, sizeof(*proto->p_ap_shm),
proto->p_ap_shm, proto->phy_ap_shm);
ipc_pm_deinit(proto);
kfree(proto);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_protocol.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include "iosm_ipc_imem.h"
#include "iosm_ipc_task_queue.h"
/* Actual tasklet function, will be called whenever tasklet is scheduled.
* Calls event handler involves callback for each element in the message queue
*/
static void ipc_task_queue_handler(unsigned long data)
{
struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
unsigned int q_rpos = ipc_task->q_rpos;
/* Loop over the input queue contents. */
while (q_rpos != ipc_task->q_wpos) {
/* Get the current first queue element. */
struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
/* Process the input message. */
if (args->func)
args->response = args->func(args->ipc_imem, args->arg,
args->msg, args->size);
/* Signal completion for synchronous calls */
if (args->completion)
complete(args->completion);
/* Free message if copy was allocated. */
if (args->is_copy)
kfree(args->msg);
/* Set invalid queue element. Technically
* spin_lock_irqsave is not required here as
* the array element has been processed already
* so we can assume that immediately after processing
* ipc_task element, queue will not rotate again to
* ipc_task same element within such short time.
*/
args->completion = NULL;
args->func = NULL;
args->msg = NULL;
args->size = 0;
args->is_copy = false;
/* calculate the new read ptr and update the volatile read
* ptr
*/
q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
ipc_task->q_rpos = q_rpos;
}
}
/* Free memory alloc and trigger completions left in the queue during dealloc */
static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
{
unsigned int q_rpos = ipc_task->q_rpos;
while (q_rpos != ipc_task->q_wpos) {
struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
if (args->completion)
complete(args->completion);
if (args->is_copy)
kfree(args->msg);
q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
ipc_task->q_rpos = q_rpos;
}
}
/* Add a message to the queue and trigger the ipc_task. */
static int
ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
int arg, void *msg,
int (*func)(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size),
size_t size, bool is_copy, bool wait)
{
struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
struct completion completion;
unsigned int pos, nextpos;
unsigned long flags;
int result = -EIO;
init_completion(&completion);
/* tasklet send may be called from both interrupt or thread
* context, therefore protect queue operation by spinlock
*/
spin_lock_irqsave(&ipc_task->q_lock, flags);
pos = ipc_task->q_wpos;
nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
/* Get next queue position. */
if (nextpos != ipc_task->q_rpos) {
/* Get the reference to the queue element and save the passed
* values.
*/
ipc_task->args[pos].arg = arg;
ipc_task->args[pos].msg = msg;
ipc_task->args[pos].func = func;
ipc_task->args[pos].ipc_imem = ipc_imem;
ipc_task->args[pos].size = size;
ipc_task->args[pos].is_copy = is_copy;
ipc_task->args[pos].completion = wait ? &completion : NULL;
ipc_task->args[pos].response = -1;
/* apply write barrier so that ipc_task->q_rpos elements
* are updated before ipc_task->q_wpos is being updated.
*/
smp_wmb();
/* Update the status of the free queue space. */
ipc_task->q_wpos = nextpos;
result = 0;
}
spin_unlock_irqrestore(&ipc_task->q_lock, flags);
if (result == 0) {
tasklet_schedule(ipc_tasklet);
if (wait) {
wait_for_completion(&completion);
result = ipc_task->args[pos].response;
}
} else {
dev_err(ipc_imem->ipc_task->dev, "queue is full");
}
return result;
}
int ipc_task_queue_send_task(struct iosm_imem *imem,
int (*func)(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size),
int arg, void *msg, size_t size, bool wait)
{
bool is_copy = false;
void *copy = msg;
int ret = -ENOMEM;
if (size > 0) {
copy = kmemdup(msg, size, GFP_ATOMIC);
if (!copy)
goto out;
is_copy = true;
}
ret = ipc_task_queue_add_task(imem, arg, copy, func,
size, is_copy, wait);
if (ret < 0) {
dev_err(imem->ipc_task->dev,
"add task failed for %ps %d, %p, %zu, %d", func, arg,
copy, size, is_copy);
if (is_copy)
kfree(copy);
goto out;
}
ret = 0;
out:
return ret;
}
int ipc_task_init(struct ipc_task *ipc_task)
{
struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
GFP_KERNEL);
if (!ipc_task->ipc_tasklet)
return -ENOMEM;
/* Initialize the spinlock needed to protect the message queue of the
* ipc_task
*/
spin_lock_init(&ipc_queue->q_lock);
tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
(unsigned long)ipc_queue);
return 0;
}
void ipc_task_deinit(struct ipc_task *ipc_task)
{
tasklet_kill(ipc_task->ipc_tasklet);
kfree(ipc_task->ipc_tasklet);
/* This will free/complete any outstanding messages,
* without calling the actual handler
*/
ipc_task_queue_cleanup(&ipc_task->ipc_queue);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_task_queue.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/wwan.h>
#include "iosm_ipc_chnl_cfg.h"
/* Max. sizes of a downlink buffers */
#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (64 * 1024)
#define IPC_MEM_MAX_DL_LOOPBACK_SIZE (1 * 1024 * 1024)
#define IPC_MEM_MAX_DL_AT_BUF_SIZE 2048
#define IPC_MEM_MAX_DL_RPC_BUF_SIZE (32 * 1024)
#define IPC_MEM_MAX_DL_MBIM_BUF_SIZE IPC_MEM_MAX_DL_RPC_BUF_SIZE
/* Max. transfer descriptors for a pipe. */
#define IPC_MEM_MAX_TDS_FLASH_DL 3
#define IPC_MEM_MAX_TDS_FLASH_UL 6
#define IPC_MEM_MAX_TDS_AT 4
#define IPC_MEM_MAX_TDS_RPC 4
#define IPC_MEM_MAX_TDS_MBIM IPC_MEM_MAX_TDS_RPC
#define IPC_MEM_MAX_TDS_LOOPBACK 11
/* Accumulation backoff usec */
#define IRQ_ACC_BACKOFF_OFF 0
/* MUX acc backoff 1ms */
#define IRQ_ACC_BACKOFF_MUX 1000
/* Modem channel configuration table
* Always reserve element zero for flash channel.
*/
static struct ipc_chnl_cfg modem_cfg[] = {
/* IP Mux */
{ IPC_MEM_IP_CHL_ID_0, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1,
IPC_MEM_MAX_TDS_MUX_LITE_UL, IPC_MEM_MAX_TDS_MUX_LITE_DL,
IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE, WWAN_PORT_UNKNOWN },
/* RPC - 0 */
{ IPC_MEM_CTRL_CHL_ID_1, IPC_MEM_PIPE_2, IPC_MEM_PIPE_3,
IPC_MEM_MAX_TDS_RPC, IPC_MEM_MAX_TDS_RPC,
IPC_MEM_MAX_DL_RPC_BUF_SIZE, WWAN_PORT_XMMRPC },
/* IAT0 */
{ IPC_MEM_CTRL_CHL_ID_2, IPC_MEM_PIPE_4, IPC_MEM_PIPE_5,
IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
WWAN_PORT_AT },
/* Trace */
{ IPC_MEM_CTRL_CHL_ID_3, IPC_MEM_PIPE_6, IPC_MEM_PIPE_7,
IPC_MEM_TDS_TRC, IPC_MEM_TDS_TRC, IPC_MEM_MAX_DL_TRC_BUF_SIZE,
WWAN_PORT_UNKNOWN },
/* IAT1 */
{ IPC_MEM_CTRL_CHL_ID_4, IPC_MEM_PIPE_8, IPC_MEM_PIPE_9,
IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
WWAN_PORT_AT },
/* Loopback */
{ IPC_MEM_CTRL_CHL_ID_5, IPC_MEM_PIPE_10, IPC_MEM_PIPE_11,
IPC_MEM_MAX_TDS_LOOPBACK, IPC_MEM_MAX_TDS_LOOPBACK,
IPC_MEM_MAX_DL_LOOPBACK_SIZE, WWAN_PORT_UNKNOWN },
/* MBIM Channel */
{ IPC_MEM_CTRL_CHL_ID_6, IPC_MEM_PIPE_12, IPC_MEM_PIPE_13,
IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_TDS_MBIM,
IPC_MEM_MAX_DL_MBIM_BUF_SIZE, WWAN_PORT_MBIM },
/* Flash Channel/Coredump Channel */
{ IPC_MEM_CTRL_CHL_ID_7, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1,
IPC_MEM_MAX_TDS_FLASH_UL, IPC_MEM_MAX_TDS_FLASH_DL,
IPC_MEM_MAX_DL_FLASH_BUF_SIZE, WWAN_PORT_UNKNOWN },
};
int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
{
if (index >= ARRAY_SIZE(modem_cfg)) {
pr_err("index: %d and array size %zu", index,
ARRAY_SIZE(modem_cfg));
return -ECHRNG;
}
if (index == IPC_MEM_MUX_IP_CH_IF_ID)
chnl_cfg->accumulation_backoff = IRQ_ACC_BACKOFF_MUX;
else
chnl_cfg->accumulation_backoff = IRQ_ACC_BACKOFF_OFF;
chnl_cfg->ul_nr_of_entries = modem_cfg[index].ul_nr_of_entries;
chnl_cfg->dl_nr_of_entries = modem_cfg[index].dl_nr_of_entries;
chnl_cfg->dl_buf_size = modem_cfg[index].dl_buf_size;
chnl_cfg->id = modem_cfg[index].id;
chnl_cfg->ul_pipe = modem_cfg[index].ul_pipe;
chnl_cfg->dl_pipe = modem_cfg[index].dl_pipe;
chnl_cfg->wwan_port_type = modem_cfg[index].wwan_port_type;
return 0;
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
#include "iosm_ipc_coredump.h"
#include "iosm_ipc_devlink.h"
#include "iosm_ipc_flash.h"
/* This function will pack the data to be sent to the modem using the
* payload, payload length and pack id
*/
static int ipc_flash_proc_format_ebl_pack(struct iosm_flash_data *flash_req,
u32 pack_length, u16 pack_id,
u8 *payload, u32 payload_length)
{
u16 checksum = pack_id;
u32 i;
if (payload_length + IOSM_EBL_HEAD_SIZE > pack_length)
return -EINVAL;
flash_req->pack_id = cpu_to_le16(pack_id);
flash_req->msg_length = cpu_to_le32(payload_length);
checksum += (payload_length >> IOSM_EBL_PAYL_SHIFT) +
(payload_length & IOSM_EBL_CKSM);
for (i = 0; i < payload_length; i++)
checksum += payload[i];
flash_req->checksum = cpu_to_le16(checksum);
return 0;
}
/* validate the response received from modem and
* check the type of errors received
*/
static int ipc_flash_proc_check_ebl_rsp(void *hdr_rsp, void *payload_rsp)
{
struct iosm_ebl_error *err_info = payload_rsp;
u16 *rsp_code = hdr_rsp;
u32 i;
if (*rsp_code == IOSM_EBL_RSP_BUFF) {
for (i = 0; i < IOSM_MAX_ERRORS; i++) {
if (!err_info->error[i].error_code) {
pr_err("EBL: error_class = %d, error_code = %d",
err_info->error[i].error_class,
err_info->error[i].error_code);
}
}
return -EINVAL;
}
return 0;
}
/* Send data to the modem */
static int ipc_flash_send_data(struct iosm_devlink *ipc_devlink, u32 size,
u16 pack_id, u8 *payload, u32 payload_length)
{
struct iosm_flash_data flash_req;
int ret;
ret = ipc_flash_proc_format_ebl_pack(&flash_req, size,
pack_id, payload, payload_length);
if (ret) {
dev_err(ipc_devlink->dev, "EBL2 pack failed for pack_id:%d",
pack_id);
goto ipc_free_payload;
}
ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&flash_req,
IOSM_EBL_HEAD_SIZE);
if (ret) {
dev_err(ipc_devlink->dev, "EBL Header write failed for Id:%x",
pack_id);
goto ipc_free_payload;
}
ret = ipc_imem_sys_devlink_write(ipc_devlink, payload, payload_length);
if (ret) {
dev_err(ipc_devlink->dev, "EBL Payload write failed for Id:%x",
pack_id);
}
ipc_free_payload:
return ret;
}
/**
* ipc_flash_link_establish - Flash link establishment
* @ipc_imem: Pointer to struct iosm_imem
*
* Returns: 0 on success and failure value on error
*/
int ipc_flash_link_establish(struct iosm_imem *ipc_imem)
{
u8 ler_data[IOSM_LER_RSP_SIZE];
u32 bytes_read;
/* Allocate channel for flashing/cd collection */
ipc_imem->ipc_devlink->devlink_sio.channel =
ipc_imem_sys_devlink_open(ipc_imem);
if (!ipc_imem->ipc_devlink->devlink_sio.channel)
goto chl_open_fail;
if (ipc_imem_sys_devlink_read(ipc_imem->ipc_devlink, ler_data,
IOSM_LER_RSP_SIZE, &bytes_read))
goto devlink_read_fail;
if (bytes_read != IOSM_LER_RSP_SIZE)
goto devlink_read_fail;
return 0;
devlink_read_fail:
ipc_imem_sys_devlink_close(ipc_imem->ipc_devlink);
chl_open_fail:
return -EIO;
}
/* Receive data from the modem */
static int ipc_flash_receive_data(struct iosm_devlink *ipc_devlink, u32 size,
u8 *mdm_rsp)
{
u8 mdm_rsp_hdr[IOSM_EBL_HEAD_SIZE];
u32 bytes_read;
int ret;
ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp_hdr,
IOSM_EBL_HEAD_SIZE, &bytes_read);
if (ret) {
dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed",
IOSM_EBL_HEAD_SIZE);
goto ipc_flash_recv_err;
}
if (bytes_read != IOSM_EBL_HEAD_SIZE) {
ret = -EINVAL;
goto ipc_flash_recv_err;
}
ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp, size,
&bytes_read);
if (ret) {
dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed",
size);
goto ipc_flash_recv_err;
}
if (bytes_read != size) {
ret = -EINVAL;
goto ipc_flash_recv_err;
}
ret = ipc_flash_proc_check_ebl_rsp(mdm_rsp_hdr + 2, mdm_rsp);
ipc_flash_recv_err:
return ret;
}
/* Function to send command to modem and receive response */
static int ipc_flash_send_receive(struct iosm_devlink *ipc_devlink, u16 pack_id,
u8 *payload, u32 payload_length, u8 *mdm_rsp)
{
size_t frame_len = IOSM_EBL_DW_PACK_SIZE;
int ret;
if (pack_id == FLASH_SET_PROT_CONF)
frame_len = IOSM_EBL_W_PACK_SIZE;
ret = ipc_flash_send_data(ipc_devlink, frame_len, pack_id, payload,
payload_length);
if (ret)
goto ipc_flash_send_rcv;
ret = ipc_flash_receive_data(ipc_devlink,
frame_len - IOSM_EBL_HEAD_SIZE, mdm_rsp);
ipc_flash_send_rcv:
return ret;
}
/**
* ipc_flash_boot_set_capabilities - Set modem boot capabilities in flash
* @ipc_devlink: Pointer to devlink structure
* @mdm_rsp: Pointer to modem response buffer
*
* Returns: 0 on success and failure value on error
*/
int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink,
u8 *mdm_rsp)
{
ipc_devlink->ebl_ctx.ebl_sw_info_version =
ipc_devlink->ebl_ctx.m_ebl_resp[EBL_RSP_SW_INFO_VER];
ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_ERASE] = IOSM_CAP_NOT_ENHANCED;
ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_CRC] = IOSM_CAP_NOT_ENHANCED;
if (ipc_devlink->ebl_ctx.m_ebl_resp[EBL_CAPS_FLAG] &
IOSM_CAP_USE_EXT_CAP) {
if (ipc_devlink->param.erase_full_flash)
ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &=
~((u8)IOSM_EXT_CAP_ERASE_ALL);
else
ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &=
~((u8)IOSM_EXT_CAP_COMMIT_ALL);
ipc_devlink->ebl_ctx.m_ebl_resp[EBL_EXT_CAPS_HANDLED] =
IOSM_CAP_USE_EXT_CAP;
}
/* Write back the EBL capability to modem
* Request Set Protcnf command
*/
return ipc_flash_send_receive(ipc_devlink, FLASH_SET_PROT_CONF,
ipc_devlink->ebl_ctx.m_ebl_resp,
IOSM_EBL_RSP_SIZE, mdm_rsp);
}
/* Read the SWID type and SWID value from the EBL */
int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
{
struct iosm_flash_msg_control cmd_msg;
struct iosm_swid_table *swid;
char ebl_swid[IOSM_SWID_STR];
int ret;
if (ipc_devlink->ebl_ctx.ebl_sw_info_version !=
IOSM_EXT_CAP_SWID_OOS_PACK)
return -EINVAL;
cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_READ);
cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_SWID_TABLE);
cmd_msg.length = cpu_to_le32(IOSM_MSG_LEN_ARG);
cmd_msg.arguments = cpu_to_le32(IOSM_MSG_LEN_ARG);
ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL,
(u8 *)&cmd_msg, IOSM_MDM_SEND_16, mdm_rsp);
if (ret)
goto ipc_swid_err;
cmd_msg.action = cpu_to_le32(*((u32 *)mdm_rsp));
ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_DATA_READ,
(u8 *)&cmd_msg, IOSM_MDM_SEND_4, mdm_rsp);
if (ret)
goto ipc_swid_err;
swid = (struct iosm_swid_table *)mdm_rsp;
dev_dbg(ipc_devlink->dev, "SWID %x RF_ENGINE_ID %x", swid->sw_id_val,
swid->rf_engine_id_val);
snprintf(ebl_swid, sizeof(ebl_swid), "SWID: %x, RF_ENGINE_ID: %x",
swid->sw_id_val, swid->rf_engine_id_val);
devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, ebl_swid,
NULL, 0, 0);
ipc_swid_err:
return ret;
}
/* Function to check if full erase or conditional erase was successful */
static int ipc_flash_erase_check(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
{
int ret, count = 0;
u16 mdm_rsp_data;
/* Request Flash Erase Check */
do {
mdm_rsp_data = IOSM_MDM_SEND_DATA;
ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_CHECK,
(u8 *)&mdm_rsp_data,
IOSM_MDM_SEND_2, mdm_rsp);
if (ret)
goto ipc_erase_chk_err;
mdm_rsp_data = *((u16 *)mdm_rsp);
if (mdm_rsp_data > IOSM_MDM_ERASE_RSP) {
dev_err(ipc_devlink->dev,
"Flash Erase Check resp wrong 0x%04X",
mdm_rsp_data);
ret = -EINVAL;
goto ipc_erase_chk_err;
}
count++;
msleep(IOSM_FLASH_ERASE_CHECK_INTERVAL);
} while ((mdm_rsp_data != IOSM_MDM_ERASE_RSP) &&
(count < (IOSM_FLASH_ERASE_CHECK_TIMEOUT /
IOSM_FLASH_ERASE_CHECK_INTERVAL)));
if (mdm_rsp_data != IOSM_MDM_ERASE_RSP) {
dev_err(ipc_devlink->dev, "Modem erase check timeout failure!");
ret = -ETIMEDOUT;
}
ipc_erase_chk_err:
return ret;
}
/* Full erase function which will erase the nand flash through EBL command */
static int ipc_flash_full_erase(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
{
u32 erase_address = IOSM_ERASE_START_ADDR;
struct iosm_flash_msg_control cmd_msg;
u32 erase_length = IOSM_ERASE_LEN;
int ret;
dev_dbg(ipc_devlink->dev, "Erase full nand flash");
cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_ERASE);
cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_ALL_FLASH);
cmd_msg.length = cpu_to_le32(erase_length);
cmd_msg.arguments = cpu_to_le32(erase_address);
ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL,
(unsigned char *)&cmd_msg,
IOSM_MDM_SEND_16, mdm_rsp);
if (ret)
goto ipc_flash_erase_err;
ipc_devlink->param.erase_full_flash_done = IOSM_SET_FLAG;
ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp);
ipc_flash_erase_err:
return ret;
}
/* Logic for flashing all the Loadmaps available for individual fls file */
static int ipc_flash_download_region(struct iosm_devlink *ipc_devlink,
const struct firmware *fw, u8 *mdm_rsp)
{
u32 raw_len, rest_len = fw->size - IOSM_DEVLINK_HDR_SIZE;
struct iosm_devlink_image *fls_data;
__le32 reg_info[2]; /* 0th position region address, 1st position size */
u32 nand_address;
char *file_ptr;
int ret;
fls_data = (struct iosm_devlink_image *)fw->data;
file_ptr = (void *)(fls_data + 1);
nand_address = le32_to_cpu(fls_data->region_address);
reg_info[0] = cpu_to_le32(nand_address);
if (!ipc_devlink->param.erase_full_flash_done) {
reg_info[1] = cpu_to_le32(nand_address + rest_len - 2);
ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_START,
(u8 *)reg_info, IOSM_MDM_SEND_8,
mdm_rsp);
if (ret)
goto dl_region_fail;
ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp);
if (ret)
goto dl_region_fail;
}
/* Request Flash Set Address */
ret = ipc_flash_send_receive(ipc_devlink, FLASH_SET_ADDRESS,
(u8 *)reg_info, IOSM_MDM_SEND_4, mdm_rsp);
if (ret)
goto dl_region_fail;
/* Request Flash Write Raw Image */
ret = ipc_flash_send_data(ipc_devlink, IOSM_EBL_DW_PACK_SIZE,
FLASH_WRITE_IMAGE_RAW, (u8 *)&rest_len,
IOSM_MDM_SEND_4);
if (ret)
goto dl_region_fail;
do {
raw_len = (rest_len > IOSM_FLS_BUF_SIZE) ? IOSM_FLS_BUF_SIZE :
rest_len;
ret = ipc_imem_sys_devlink_write(ipc_devlink, file_ptr,
raw_len);
if (ret) {
dev_err(ipc_devlink->dev, "Image write failed");
goto dl_region_fail;
}
file_ptr += raw_len;
rest_len -= raw_len;
} while (rest_len);
ret = ipc_flash_receive_data(ipc_devlink, IOSM_EBL_DW_PAYL_SIZE,
mdm_rsp);
dl_region_fail:
return ret;
}
/**
* ipc_flash_send_fls - Inject Modem subsystem fls file to device
* @ipc_devlink: Pointer to devlink structure
* @fw: FW image
* @mdm_rsp: Pointer to modem response buffer
*
* Returns: 0 on success and failure value on error
*/
int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
const struct firmware *fw, u8 *mdm_rsp)
{
u32 fw_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
struct iosm_devlink_image *fls_data;
u16 flash_cmd;
int ret;
fls_data = (struct iosm_devlink_image *)fw->data;
if (ipc_devlink->param.erase_full_flash) {
ipc_devlink->param.erase_full_flash = false;
ret = ipc_flash_full_erase(ipc_devlink, mdm_rsp);
if (ret)
goto ipc_flash_err;
}
/* Request Sec Start */
if (!fls_data->download_region) {
ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_START,
(u8 *)fw->data +
IOSM_DEVLINK_HDR_SIZE, fw_size,
mdm_rsp);
if (ret)
goto ipc_flash_err;
} else {
/* Download regions */
ret = ipc_flash_download_region(ipc_devlink, fw, mdm_rsp);
if (ret)
goto ipc_flash_err;
if (fls_data->last_region) {
/* Request Sec End */
flash_cmd = IOSM_MDM_SEND_DATA;
ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_END,
(u8 *)&flash_cmd,
IOSM_MDM_SEND_2, mdm_rsp);
}
}
ipc_flash_err:
return ret;
}
/**
* ipc_flash_boot_psi - Inject PSI image
* @ipc_devlink: Pointer to devlink structure
* @fw: FW image
*
* Returns: 0 on success and failure value on error
*/
int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink,
const struct firmware *fw)
{
u32 bytes_read, psi_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
u8 psi_ack_byte[IOSM_PSI_ACK], read_data[2];
u8 *psi_code;
int ret;
dev_dbg(ipc_devlink->dev, "Boot transfer PSI");
psi_code = kmemdup(fw->data + IOSM_DEVLINK_HDR_SIZE, psi_size,
GFP_KERNEL);
if (!psi_code)
return -ENOMEM;
ret = ipc_imem_sys_devlink_write(ipc_devlink, psi_code, psi_size);
if (ret) {
dev_err(ipc_devlink->dev, "RPSI Image write failed");
goto ipc_flash_psi_free;
}
ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data,
IOSM_LER_ACK_SIZE, &bytes_read);
if (ret) {
dev_err(ipc_devlink->dev, "ipc_devlink_sio_read ACK failed");
goto ipc_flash_psi_free;
}
if (bytes_read != IOSM_LER_ACK_SIZE) {
ret = -EINVAL;
goto ipc_flash_psi_free;
}
snprintf(psi_ack_byte, sizeof(psi_ack_byte), "%x%x", read_data[0],
read_data[1]);
devlink_flash_update_status_notify(ipc_devlink->devlink_ctx,
psi_ack_byte, "PSI ACK", 0, 0);
if (read_data[0] == 0x00 && read_data[1] == 0xCD) {
dev_dbg(ipc_devlink->dev, "Coredump detected");
ret = ipc_coredump_get_list(ipc_devlink,
rpsi_cmd_coredump_start);
if (ret)
dev_err(ipc_devlink->dev, "Failed to get cd list");
}
ipc_flash_psi_free:
kfree(psi_code);
return ret;
}
/**
* ipc_flash_boot_ebl - Inject EBL image
* @ipc_devlink: Pointer to devlink structure
* @fw: FW image
*
* Returns: 0 on success and failure value on error
*/
int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
const struct firmware *fw)
{
u32 ebl_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
u8 read_data[2];
u32 bytes_read;
int ret;
if (ipc_mmio_get_exec_stage(ipc_devlink->pcie->imem->mmio) !=
IPC_MEM_EXEC_STAGE_PSI) {
devlink_flash_update_status_notify(ipc_devlink->devlink_ctx,
"Invalid execution stage",
NULL, 0, 0);
return -EINVAL;
}
dev_dbg(ipc_devlink->dev, "Boot transfer EBL");
ret = ipc_devlink_send_cmd(ipc_devlink, rpsi_cmd_code_ebl,
IOSM_RPSI_LOAD_SIZE);
if (ret) {
dev_err(ipc_devlink->dev, "Sending rpsi_cmd_code_ebl failed");
goto ipc_flash_ebl_err;
}
ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
&bytes_read);
if (ret) {
dev_err(ipc_devlink->dev, "rpsi_cmd_code_ebl read failed");
goto ipc_flash_ebl_err;
}
if (bytes_read != IOSM_READ_SIZE) {
ret = -EINVAL;
goto ipc_flash_ebl_err;
}
ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&ebl_size,
sizeof(ebl_size));
if (ret) {
dev_err(ipc_devlink->dev, "EBL length write failed");
goto ipc_flash_ebl_err;
}
ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
&bytes_read);
if (ret) {
dev_err(ipc_devlink->dev, "EBL read failed");
goto ipc_flash_ebl_err;
}
if (bytes_read != IOSM_READ_SIZE) {
ret = -EINVAL;
goto ipc_flash_ebl_err;
}
ret = ipc_imem_sys_devlink_write(ipc_devlink,
(u8 *)fw->data + IOSM_DEVLINK_HDR_SIZE,
ebl_size);
if (ret) {
dev_err(ipc_devlink->dev, "EBL data transfer failed");
goto ipc_flash_ebl_err;
}
ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
&bytes_read);
if (ret) {
dev_err(ipc_devlink->dev, "EBL read failed");
goto ipc_flash_ebl_err;
}
if (bytes_read != IOSM_READ_SIZE) {
ret = -EINVAL;
goto ipc_flash_ebl_err;
}
ret = ipc_imem_sys_devlink_read(ipc_devlink,
ipc_devlink->ebl_ctx.m_ebl_resp,
IOSM_EBL_RSP_SIZE, &bytes_read);
if (ret) {
dev_err(ipc_devlink->dev, "EBL response read failed");
goto ipc_flash_ebl_err;
}
if (bytes_read != IOSM_EBL_RSP_SIZE)
ret = -EINVAL;
ipc_flash_ebl_err:
return ret;
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_flash.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_devlink.h"
#include "iosm_ipc_flash.h"
#include "iosm_ipc_imem.h"
#include "iosm_ipc_port.h"
#include "iosm_ipc_trace.h"
#include "iosm_ipc_debugfs.h"
/* Check the wwan ips if it is valid with Channel as input. */
static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
{
if (chnl)
return chnl->ctype == IPC_CTYPE_WWAN &&
chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
return false;
}
static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
{
union ipc_msg_prep_args prep_args = {
.sleep.target = 1,
.sleep.state = state,
};
ipc_imem->device_sleep = state;
return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
IPC_MSG_PREP_SLEEP, &prep_args, NULL);
}
static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
struct ipc_pipe *pipe)
{
/* limit max. nr of entries */
if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
return false;
return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
}
/* This timer handler will retry DL buff allocation if a pipe has no free buf
* and gives doorbell if TD is available
*/
static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
bool new_buffers_available = false;
bool retry_allocation = false;
int i;
for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
continue;
while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
new_buffers_available = true;
if (pipe->nr_of_queued_entries == 0)
retry_allocation = true;
}
if (new_buffers_available)
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
IPC_HP_DL_PROCESS);
if (retry_allocation) {
ipc_imem->hrtimer_period =
ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
if (!hrtimer_active(&ipc_imem->td_alloc_timer))
hrtimer_start(&ipc_imem->td_alloc_timer,
ipc_imem->hrtimer_period,
HRTIMER_MODE_REL);
}
return 0;
}
static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
{
struct iosm_imem *ipc_imem =
container_of(hr_timer, struct iosm_imem, td_alloc_timer);
/* Post an async tasklet event to trigger HP update Doorbell */
ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
0, false);
return HRTIMER_NORESTART;
}
/* Fast update timer tasklet handler to trigger HP update */
static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
IPC_HP_FAST_TD_UPD_TMR);
return 0;
}
static enum hrtimer_restart
ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
{
struct iosm_imem *ipc_imem =
container_of(hr_timer, struct iosm_imem, fast_update_timer);
/* Post an async tasklet event to trigger HP update Doorbell */
ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
NULL, 0, false);
return HRTIMER_NORESTART;
}
static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
ipc_mux_ul_adb_finish(ipc_imem->mux);
return 0;
}
static enum hrtimer_restart
ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
{
struct iosm_imem *ipc_imem =
container_of(hr_timer, struct iosm_imem, adb_timer);
ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
NULL, 0, false);
return HRTIMER_NORESTART;
}
static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
struct ipc_mux_config *cfg)
{
ipc_mmio_update_cp_capability(ipc_imem->mmio);
if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
dev_err(ipc_imem->dev, "Failed to get Mux capability.");
return -EINVAL;
}
cfg->protocol = ipc_imem->mmio->mux_protocol;
cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
MUX_UL_ON_CREDITS :
MUX_UL;
/* The instance ID is same as channel ID because this is been reused
* for channel alloc function.
*/
cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
return 0;
}
void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
unsigned int reset_enable, bool atomic_ctx)
{
union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
reset_enable };
if (atomic_ctx)
ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
IPC_MSG_PREP_FEATURE_SET, &prep_args,
NULL);
else
ipc_protocol_msg_send(ipc_imem->ipc_protocol,
IPC_MSG_PREP_FEATURE_SET, &prep_args);
}
/**
* ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
* @ipc_imem: Pointer to imem data-struct
*/
void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
{
/* Use the TD update timer only in the runtime phase */
if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
/* trigger the doorbell irq on CP directly. */
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
IPC_HP_TD_UPD_TMR_START);
return;
}
if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
ipc_imem->hrtimer_period =
ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
if (!hrtimer_active(&ipc_imem->tdupdate_timer))
hrtimer_start(&ipc_imem->tdupdate_timer,
ipc_imem->hrtimer_period,
HRTIMER_MODE_REL);
}
}
void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
{
if (hrtimer_active(hr_timer))
hrtimer_cancel(hr_timer);
}
/**
* ipc_imem_adb_timer_start - Starts the adb Timer if not starting.
* @ipc_imem: Pointer to imem data-struct
*/
void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
{
if (!hrtimer_active(&ipc_imem->adb_timer)) {
ipc_imem->hrtimer_period =
ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
hrtimer_start(&ipc_imem->adb_timer,
ipc_imem->hrtimer_period,
HRTIMER_MODE_REL);
}
}
bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
bool hpda_ctrl_pending = false;
struct sk_buff_head *ul_list;
bool hpda_pending = false;
struct ipc_pipe *pipe;
int i;
/* Analyze the uplink pipe of all active channels. */
for (i = 0; i < ipc_imem->nr_of_channels; i++) {
channel = &ipc_imem->channels[i];
if (channel->state != IMEM_CHANNEL_ACTIVE)
continue;
pipe = &channel->ul_pipe;
/* Get the reference to the skbuf accumulator list. */
ul_list = &channel->ul_list;
/* Fill the transfer descriptor with the uplink buffer info. */
if (!ipc_imem_check_wwan_ips(channel)) {
hpda_ctrl_pending |=
ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
pipe, ul_list);
} else {
hpda_pending |=
ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
pipe, ul_list);
}
}
/* forced HP update needed for non data channels */
if (hpda_ctrl_pending) {
hpda_pending = false;
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
IPC_HP_UL_WRITE_TD);
}
return hpda_pending;
}
void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
{
int timeout = IPC_MODEM_BOOT_TIMEOUT;
ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
/* Trigger the CP interrupt to enter the init state. */
ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
IPC_MEM_DEVICE_IPC_INIT);
/* Wait for the CP update. */
do {
if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
ipc_imem->ipc_requested_state) {
/* Prepare the MMIO space */
ipc_mmio_config(ipc_imem->mmio);
/* Trigger the CP irq to enter the running state. */
ipc_imem->ipc_requested_state =
IPC_MEM_DEVICE_IPC_RUNNING;
ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
IPC_MEM_DEVICE_IPC_RUNNING);
return;
}
msleep(20);
} while (--timeout);
/* timeout */
dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
ipc_imem_phase_get_string(ipc_imem->phase),
ipc_mmio_get_ipc_state(ipc_imem->mmio));
ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
}
/* Analyze the packet type and distribute it. */
static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
struct ipc_pipe *pipe, struct sk_buff *skb)
{
u16 port_id;
if (!skb)
return;
/* An AT/control or IP packet is expected. */
switch (pipe->channel->ctype) {
case IPC_CTYPE_CTRL:
port_id = pipe->channel->channel_id;
ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
IPC_CB(skb)->mapping,
IPC_CB(skb)->direction);
if (port_id == IPC_MEM_CTRL_CHL_ID_7)
ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
skb);
else if (ipc_is_trace_channel(ipc_imem, port_id))
ipc_trace_port_rx(ipc_imem, skb);
else
wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
skb);
break;
case IPC_CTYPE_WWAN:
if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
ipc_mux_dl_decode(ipc_imem->mux, skb);
break;
default:
dev_err(ipc_imem->dev, "Invalid channel type");
break;
}
}
/* Process the downlink data and pass them to the char or net layer. */
static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
struct ipc_pipe *pipe)
{
s32 cnt = 0, processed_td_cnt = 0;
struct ipc_mem_channel *channel;
u32 head = 0, tail = 0;
bool processed = false;
struct sk_buff *skb;
channel = pipe->channel;
ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
&tail);
if (pipe->old_tail != tail) {
if (pipe->old_tail < tail)
cnt = tail - pipe->old_tail;
else
cnt = pipe->nr_of_entries - pipe->old_tail + tail;
}
processed_td_cnt = cnt;
/* Seek for pipes with pending DL data. */
while (cnt--) {
skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
/* Analyze the packet type and distribute it. */
ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
}
/* try to allocate new empty DL SKbs from head..tail - 1*/
while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
processed = true;
if (processed && !ipc_imem_check_wwan_ips(channel)) {
/* Force HP update for non IP channels */
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
IPC_HP_DL_PROCESS);
processed = false;
/* If Fast Update timer is already running then stop */
ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
}
/* Any control channel process will get immediate HP update.
* Start Fast update timer only for IP channel if all the TDs were
* used in last process.
*/
if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
ipc_imem->hrtimer_period =
ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
hrtimer_start(&ipc_imem->fast_update_timer,
ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
}
if (ipc_imem->app_notify_dl_pend)
complete(&ipc_imem->dl_pend_sem);
}
/* process open uplink pipe */
static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
struct ipc_pipe *pipe)
{
struct ipc_mem_channel *channel;
u32 tail = 0, head = 0;
struct sk_buff *skb;
s32 cnt = 0;
channel = pipe->channel;
/* Get the internal phase. */
ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
&tail);
if (pipe->old_tail != tail) {
if (pipe->old_tail < tail)
cnt = tail - pipe->old_tail;
else
cnt = pipe->nr_of_entries - pipe->old_tail + tail;
}
/* Free UL buffers. */
while (cnt--) {
skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
if (!skb)
continue;
/* If the user app was suspended in uplink direction - blocking
* write, resume it.
*/
if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
complete(&channel->ul_sem);
/* Free the skbuf element. */
if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
else
dev_err(ipc_imem->dev,
"OP Type is UL_MUX, unknown if_id %d",
channel->if_id);
} else {
ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
}
}
/* Trace channel stats for IP UL pipe. */
if (ipc_imem_check_wwan_ips(pipe->channel))
ipc_mux_check_n_restart_tx(ipc_imem->mux);
if (ipc_imem->app_notify_ul_pend)
complete(&ipc_imem->ul_pend_sem);
}
/* Executes the irq. */
static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
channel = ipc_imem->ipc_devlink->devlink_sio.channel;
ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
complete(&channel->ul_sem);
}
/* Execute the UL bundle timer actions, generating the doorbell irq. */
static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
IPC_HP_TD_UPD_TMR);
return 0;
}
/* Consider link power management in the runtime phase. */
static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
{
/* link will go down, Test pending UL packets.*/
if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
hrtimer_active(&ipc_imem->tdupdate_timer)) {
/* Generate the doorbell irq. */
ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
/* Stop the TD update timer. */
ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
/* Stop the fast update timer. */
ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
}
}
/* Execute startup timer and wait for delayed start (e.g. NAND) */
static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
/* Update & check the current operation phase. */
if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
return -EIO;
if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
IPC_MEM_DEVICE_IPC_UNINIT) {
ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
IPC_MEM_DEVICE_IPC_INIT);
ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
/* reduce period to 100 ms to check for mmio init state */
if (!hrtimer_active(&ipc_imem->startup_timer))
hrtimer_start(&ipc_imem->startup_timer,
ipc_imem->hrtimer_period,
HRTIMER_MODE_REL);
} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
IPC_MEM_DEVICE_IPC_INIT) {
/* Startup complete - disable timer */
ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
/* Prepare the MMIO space */
ipc_mmio_config(ipc_imem->mmio);
ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
IPC_MEM_DEVICE_IPC_RUNNING);
}
return 0;
}
static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
{
enum hrtimer_restart result = HRTIMER_NORESTART;
struct iosm_imem *ipc_imem =
container_of(hr_timer, struct iosm_imem, startup_timer);
if (ktime_to_ns(ipc_imem->hrtimer_period)) {
hrtimer_forward_now(&ipc_imem->startup_timer,
ipc_imem->hrtimer_period);
result = HRTIMER_RESTART;
}
ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
NULL, 0, false);
return result;
}
/* Get the CP execution stage */
static enum ipc_mem_exec_stage
ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
{
return (ipc_imem->phase == IPC_P_RUN &&
ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
ipc_mmio_get_exec_stage(ipc_imem->mmio);
}
/* Callback to send the modem ready uevent */
static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
enum ipc_mem_exec_stage exec_stage =
ipc_imem_get_exec_stage_buffered(ipc_imem);
if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
return 0;
}
/* This function is executed in a task context via an ipc_worker object,
* as the creation or removal of device can't be done from tasklet.
*/
static void ipc_imem_run_state_worker(struct work_struct *instance)
{
struct ipc_chnl_cfg chnl_cfg_port = { 0 };
struct ipc_mux_config mux_cfg;
struct iosm_imem *ipc_imem;
u8 ctrl_chl_idx = 0;
int ret;
ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
if (ipc_imem->phase != IPC_P_RUN) {
dev_err(ipc_imem->dev,
"Modem link down. Exit run state worker.");
goto err_out;
}
if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
ipc_devlink_deinit(ipc_imem->ipc_devlink);
ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
if (ret < 0)
goto err_out;
ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
if (!ipc_imem->mux)
goto err_out;
ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
if (ret < 0)
goto err_ipc_mux_deinit;
ipc_imem->mux->wwan = ipc_imem->wwan;
while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID &&
chnl_cfg_port.wwan_port_type == WWAN_PORT_XMMRPC) {
ctrl_chl_idx++;
continue;
}
if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
ctrl_chl_idx++;
continue;
}
if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
chnl_cfg_port,
IRQ_MOD_OFF);
ipc_imem->ipc_port[ctrl_chl_idx] =
ipc_port_init(ipc_imem, chnl_cfg_port);
}
}
ctrl_chl_idx++;
}
ipc_debugfs_init(ipc_imem);
ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
false);
/* Complete all memory stores before setting bit */
smp_mb__before_atomic();
set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) {
pm_runtime_mark_last_busy(ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_imem->dev);
}
return;
err_ipc_mux_deinit:
ipc_mux_deinit(ipc_imem->mux);
err_out:
ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
}
static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
{
enum ipc_mem_device_ipc_state curr_ipc_status;
enum ipc_phase old_phase, phase;
bool retry_allocation = false;
bool ul_pending = false;
int i;
if (irq != IMEM_IRQ_DONT_CARE)
ipc_imem->ev_irq_pending[irq] = false;
/* Get the internal phase. */
old_phase = ipc_imem->phase;
if (old_phase == IPC_P_OFF_REQ) {
dev_dbg(ipc_imem->dev,
"[%s]: Ignoring MSI. Deinit sequence in progress!",
ipc_imem_phase_get_string(old_phase));
return;
}
/* Update the phase controlled by CP. */
phase = ipc_imem_phase_update(ipc_imem);
switch (phase) {
case IPC_P_RUN:
if (!ipc_imem->enter_runtime) {
/* Excute the transition from flash/boot to runtime. */
ipc_imem->enter_runtime = 1;
/* allow device to sleep, default value is
* IPC_HOST_SLEEP_ENTER_SLEEP
*/
ipc_imem_msg_send_device_sleep(ipc_imem,
ipc_imem->device_sleep);
ipc_imem_msg_send_feature_set(ipc_imem,
IPC_MEM_INBAND_CRASH_SIG,
true);
}
curr_ipc_status =
ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
/* check ipc_status change */
if (ipc_imem->ipc_status != curr_ipc_status) {
ipc_imem->ipc_status = curr_ipc_status;
if (ipc_imem->ipc_status ==
IPC_MEM_DEVICE_IPC_RUNNING) {
schedule_work(&ipc_imem->run_state_worker);
}
}
/* Consider power management in the runtime phase. */
ipc_imem_slp_control_exec(ipc_imem);
break; /* Continue with skbuf processing. */
/* Unexpected phases. */
case IPC_P_OFF:
case IPC_P_OFF_REQ:
dev_err(ipc_imem->dev, "confused phase %s",
ipc_imem_phase_get_string(phase));
return;
case IPC_P_PSI:
if (old_phase != IPC_P_ROM)
break;
fallthrough;
/* On CP the PSI phase is already active. */
case IPC_P_ROM:
/* Before CP ROM driver starts the PSI image, it sets
* the exit_code field on the doorbell scratchpad and
* triggers the irq.
*/
ipc_imem_rom_irq_exec(ipc_imem);
return;
default:
break;
}
/* process message ring */
ipc_protocol_msg_process(ipc_imem, irq);
/* process all open pipes */
for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
if (dl_pipe->is_open &&
(irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
if (dl_pipe->nr_of_queued_entries == 0)
retry_allocation = true;
}
if (ul_pipe->is_open)
ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
}
/* Try to generate new ADB or ADGH. */
if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
ipc_imem_td_update_timer_start(ipc_imem);
if (ipc_imem->mux->protocol == MUX_AGGREGATION)
ipc_imem_adb_timer_start(ipc_imem);
}
/* Continue the send procedure with accumulated SIO or NETIF packets.
* Reset the debounce flags.
*/
ul_pending |= ipc_imem_ul_write_td(ipc_imem);
/* if UL data is pending restart TD update timer */
if (ul_pending) {
ipc_imem->hrtimer_period =
ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
if (!hrtimer_active(&ipc_imem->tdupdate_timer))
hrtimer_start(&ipc_imem->tdupdate_timer,
ipc_imem->hrtimer_period,
HRTIMER_MODE_REL);
}
/* If CP has executed the transition
* from IPC_INIT to IPC_RUNNING in the PSI
* phase, wake up the flash app to open the pipes.
*/
if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
IPC_MEM_DEVICE_IPC_RUNNING) {
complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
}
/* Reset the expected CP state. */
ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
if (retry_allocation) {
ipc_imem->hrtimer_period =
ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
if (!hrtimer_active(&ipc_imem->td_alloc_timer))
hrtimer_start(&ipc_imem->td_alloc_timer,
ipc_imem->hrtimer_period,
HRTIMER_MODE_REL);
}
}
/* Callback by tasklet for handling interrupt events. */
static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
size_t size)
{
ipc_imem_handle_irq(ipc_imem, arg);
return 0;
}
void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
{
/* start doorbell irq delay timer if UL is pending */
if (ipc_imem_ul_write_td(ipc_imem))
ipc_imem_td_update_timer_start(ipc_imem);
}
/* Check the execution stage and update the AP phase */
static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
enum ipc_mem_exec_stage stage)
{
switch (stage) {
case IPC_MEM_EXEC_STAGE_BOOT:
if (ipc_imem->phase != IPC_P_ROM) {
/* Send this event only once */
ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
}
ipc_imem->phase = IPC_P_ROM;
break;
case IPC_MEM_EXEC_STAGE_PSI:
ipc_imem->phase = IPC_P_PSI;
break;
case IPC_MEM_EXEC_STAGE_EBL:
ipc_imem->phase = IPC_P_EBL;
break;
case IPC_MEM_EXEC_STAGE_RUN:
if (ipc_imem->phase != IPC_P_RUN &&
ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
}
ipc_imem->phase = IPC_P_RUN;
break;
case IPC_MEM_EXEC_STAGE_CRASH:
if (ipc_imem->phase != IPC_P_CRASH)
ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
ipc_imem->phase = IPC_P_CRASH;
break;
case IPC_MEM_EXEC_STAGE_CD_READY:
if (ipc_imem->phase != IPC_P_CD_READY)
ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
ipc_imem->phase = IPC_P_CD_READY;
break;
default:
/* unknown exec stage:
* assume that link is down and send info to listeners
*/
ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
break;
}
return ipc_imem->phase;
}
/* Send msg to device to open pipe */
static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
struct ipc_pipe *pipe)
{
union ipc_msg_prep_args prep_args = {
.pipe_open.pipe = pipe,
};
if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
pipe->is_open = true;
return pipe->is_open;
}
/* Allocates the TDs for the given pipe along with firing HP update DB. */
static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
struct ipc_pipe *dl_pipe = msg;
bool processed = false;
int i;
for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
/* Trigger the doorbell irq to inform CP that new downlink buffers are
* available.
*/
if (processed)
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
return 0;
}
static enum hrtimer_restart
ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
{
struct iosm_imem *ipc_imem =
container_of(hr_timer, struct iosm_imem, tdupdate_timer);
ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
NULL, 0, false);
return HRTIMER_NORESTART;
}
/* Get the CP execution state and map it to the AP phase. */
enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
{
enum ipc_mem_exec_stage exec_stage =
ipc_imem_get_exec_stage_buffered(ipc_imem);
/* If the CP stage is undef, return the internal precalculated phase. */
return ipc_imem->phase == IPC_P_OFF_REQ ?
ipc_imem->phase :
ipc_imem_phase_update_check(ipc_imem, exec_stage);
}
const char *ipc_imem_phase_get_string(enum ipc_phase phase)
{
switch (phase) {
case IPC_P_RUN:
return "A-RUN";
case IPC_P_OFF:
return "A-OFF";
case IPC_P_ROM:
return "A-ROM";
case IPC_P_PSI:
return "A-PSI";
case IPC_P_EBL:
return "A-EBL";
case IPC_P_CRASH:
return "A-CRASH";
case IPC_P_CD_READY:
return "A-CD_READY";
case IPC_P_OFF_REQ:
return "A-OFF_REQ";
default:
return "A-???";
}
}
void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
{
union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
pipe->is_open = false;
ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
&prep_args);
ipc_imem_pipe_cleanup(ipc_imem, pipe);
}
void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
{
struct ipc_mem_channel *channel;
if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
return;
}
channel = &ipc_imem->channels[channel_id];
if (channel->state == IMEM_CHANNEL_FREE) {
dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
channel_id, channel->state);
return;
}
/* Free only the channel id in the CP power off mode. */
if (channel->state == IMEM_CHANNEL_RESERVED)
/* Release only the channel id. */
goto channel_free;
if (ipc_imem->phase == IPC_P_RUN) {
ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
}
ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
channel_free:
ipc_imem_channel_free(channel);
}
struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
int channel_id, u32 db_id)
{
struct ipc_mem_channel *channel;
if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
return NULL;
}
channel = &ipc_imem->channels[channel_id];
channel->state = IMEM_CHANNEL_ACTIVE;
if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
goto ul_pipe_err;
if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
goto dl_pipe_err;
/* Allocate the downlink buffers in tasklet context. */
if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
&channel->dl_pipe, 0, false)) {
dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
goto task_failed;
}
/* Active channel. */
return channel;
task_failed:
ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
dl_pipe_err:
ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
ul_pipe_err:
ipc_imem_channel_free(channel);
return NULL;
}
void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
{
ipc_protocol_suspend(ipc_imem->ipc_protocol);
}
void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
{
ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
}
void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
{
enum ipc_mem_exec_stage stage;
if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
ipc_imem_phase_update_check(ipc_imem, stage);
}
}
void ipc_imem_channel_free(struct ipc_mem_channel *channel)
{
/* Reset dynamic channel elements. */
channel->state = IMEM_CHANNEL_FREE;
}
int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
enum ipc_ctype ctype)
{
struct ipc_mem_channel *channel;
int i;
/* Find channel of given type/index */
for (i = 0; i < ipc_imem->nr_of_channels; i++) {
channel = &ipc_imem->channels[i];
if (channel->ctype == ctype && channel->index == index)
break;
}
if (i >= ipc_imem->nr_of_channels) {
dev_dbg(ipc_imem->dev,
"no channel definition for index=%d ctype=%d", index,
ctype);
return -ECHRNG;
}
if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
dev_dbg(ipc_imem->dev, "channel is in use");
return -EBUSY;
}
if (channel->ctype == IPC_CTYPE_WWAN &&
index == IPC_MEM_MUX_IP_CH_IF_ID)
channel->if_id = index;
channel->channel_id = index;
channel->state = IMEM_CHANNEL_RESERVED;
return i;
}
void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
{
struct ipc_mem_channel *channel;
if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
return;
}
if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
dev_err(ipc_imem->dev, "too many channels");
return;
}
channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
channel->channel_id = ipc_imem->nr_of_channels;
channel->ctype = ctype;
channel->index = chnl_cfg.id;
channel->net_err_count = 0;
channel->state = IMEM_CHANNEL_FREE;
ipc_imem->nr_of_channels++;
ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
IRQ_MOD_OFF);
skb_queue_head_init(&channel->ul_list);
init_completion(&channel->ul_sem);
}
void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
{
struct ipc_mem_channel *channel;
if (id < 0 || id >= ipc_imem->nr_of_channels) {
dev_err(ipc_imem->dev, "invalid channel id %d", id);
return;
}
channel = &ipc_imem->channels[id];
if (channel->state != IMEM_CHANNEL_FREE &&
channel->state != IMEM_CHANNEL_RESERVED) {
dev_err(ipc_imem->dev, "invalid channel state %d",
channel->state);
return;
}
channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
channel->ul_pipe.is_open = false;
channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
channel->ul_pipe.channel = channel;
channel->ul_pipe.dir = IPC_MEM_DIR_UL;
channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
channel->ul_pipe.irq_moderation = irq_moderation;
channel->ul_pipe.buf_size = 0;
channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
channel->dl_pipe.is_open = false;
channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
channel->dl_pipe.channel = channel;
channel->dl_pipe.dir = IPC_MEM_DIR_DL;
channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
channel->dl_pipe.irq_moderation = irq_moderation;
channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
}
static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
{
int i;
for (i = 0; i < ipc_imem->nr_of_channels; i++) {
struct ipc_mem_channel *channel;
channel = &ipc_imem->channels[i];
ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
ipc_imem_channel_free(channel);
}
}
void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
{
struct sk_buff *skb;
/* Force pipe to closed state also when not explicitly closed through
* ipc_imem_pipe_close()
*/
pipe->is_open = false;
/* Empty the uplink skb accumulator. */
while ((skb = skb_dequeue(&pipe->channel->ul_list)))
ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
}
/* Send IPC protocol uninit to the modem when Link is active. */
static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
{
int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
enum ipc_mem_device_ipc_state ipc_state;
/* When PCIe link is up set IPC_UNINIT
* of the modem otherwise ignore it when PCIe link down happens.
*/
if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
/* set modem to UNINIT
* (in case we want to reload the AP driver without resetting
* the modem)
*/
ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
IPC_MEM_DEVICE_IPC_UNINIT);
ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
/* Wait for maximum 30ms to allow the Modem to uninitialize the
* protocol.
*/
while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
(ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
(timeout > 0)) {
usleep_range(1000, 1250);
timeout--;
ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
}
}
}
void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
{
ipc_imem->phase = IPC_P_OFF_REQ;
/* forward MDM_NOT_READY to listeners */
ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
pm_runtime_get_sync(ipc_imem->dev);
hrtimer_cancel(&ipc_imem->td_alloc_timer);
hrtimer_cancel(&ipc_imem->tdupdate_timer);
hrtimer_cancel(&ipc_imem->fast_update_timer);
hrtimer_cancel(&ipc_imem->startup_timer);
/* cancel the workqueue */
cancel_work_sync(&ipc_imem->run_state_worker);
if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
ipc_mux_deinit(ipc_imem->mux);
ipc_debugfs_deinit(ipc_imem);
ipc_wwan_deinit(ipc_imem->wwan);
ipc_port_deinit(ipc_imem->ipc_port);
}
if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
ipc_devlink_deinit(ipc_imem->ipc_devlink);
ipc_imem_device_ipc_uninit(ipc_imem);
ipc_imem_channel_reset(ipc_imem);
ipc_protocol_deinit(ipc_imem->ipc_protocol);
ipc_task_deinit(ipc_imem->ipc_task);
kfree(ipc_imem->ipc_task);
kfree(ipc_imem->mmio);
ipc_imem->phase = IPC_P_OFF;
}
/* After CP has unblocked the PCIe link, save the start address of the doorbell
* scratchpad and prepare the shared memory region. If the flashing to RAM
* procedure shall be executed, copy the chip information from the doorbell
* scratchtpad to the application buffer and wake up the flash app.
*/
static int ipc_imem_config(struct iosm_imem *ipc_imem)
{
enum ipc_phase phase;
/* Initialize the semaphore for the blocking read UL/DL transfer. */
init_completion(&ipc_imem->ul_pend_sem);
init_completion(&ipc_imem->dl_pend_sem);
/* clear internal flags */
ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
ipc_imem->enter_runtime = 0;
phase = ipc_imem_phase_update(ipc_imem);
/* Either CP shall be in the power off or power on phase. */
switch (phase) {
case IPC_P_ROM:
ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
/* poll execution stage (for delayed start, e.g. NAND) */
if (!hrtimer_active(&ipc_imem->startup_timer))
hrtimer_start(&ipc_imem->startup_timer,
ipc_imem->hrtimer_period,
HRTIMER_MODE_REL);
return 0;
case IPC_P_PSI:
case IPC_P_EBL:
case IPC_P_RUN:
/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
/* Verify the exepected initial state. */
if (ipc_imem->ipc_requested_state ==
ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
ipc_imem_ipc_init_check(ipc_imem);
return 0;
}
dev_err(ipc_imem->dev,
"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
ipc_mmio_get_ipc_state(ipc_imem->mmio));
break;
case IPC_P_CRASH:
case IPC_P_CD_READY:
dev_dbg(ipc_imem->dev,
"Modem is in phase %d, reset Modem to collect CD",
phase);
return 0;
default:
dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
break;
}
complete(&ipc_imem->dl_pend_sem);
complete(&ipc_imem->ul_pend_sem);
ipc_imem->phase = IPC_P_OFF;
return -EIO;
}
/* Pass the dev ptr to the shared memory driver and request the entry points */
struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
void __iomem *mmio, struct device *dev)
{
struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
enum ipc_mem_exec_stage stage;
if (!ipc_imem)
return NULL;
/* Save the device address. */
ipc_imem->pcie = pcie;
ipc_imem->dev = dev;
ipc_imem->pci_device_id = device_id;
ipc_imem->cp_version = 0;
ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
/* Reset the max number of configured channels */
ipc_imem->nr_of_channels = 0;
/* allocate IPC MMIO */
ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
if (!ipc_imem->mmio) {
dev_err(ipc_imem->dev, "failed to initialize mmio region");
goto mmio_init_fail;
}
ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
GFP_KERNEL);
/* Create tasklet for event handling*/
if (!ipc_imem->ipc_task)
goto ipc_task_fail;
if (ipc_task_init(ipc_imem->ipc_task))
goto ipc_task_init_fail;
ipc_imem->ipc_task->dev = ipc_imem->dev;
INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
if (!ipc_imem->ipc_protocol)
goto protocol_init_fail;
/* The phase is set to power off. */
ipc_imem->phase = IPC_P_OFF;
hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
goto imem_config_fail;
}
stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
/* Alloc and Register devlink */
ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
if (!ipc_imem->ipc_devlink) {
dev_err(ipc_imem->dev, "Devlink register failed");
goto imem_config_fail;
}
if (ipc_flash_link_establish(ipc_imem))
goto devlink_channel_fail;
set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
}
if (!pm_runtime_enabled(ipc_imem->dev))
pm_runtime_enable(ipc_imem->dev);
pm_runtime_set_autosuspend_delay(ipc_imem->dev,
IPC_MEM_AUTO_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(ipc_imem->dev);
pm_runtime_allow(ipc_imem->dev);
pm_runtime_mark_last_busy(ipc_imem->dev);
return ipc_imem;
devlink_channel_fail:
ipc_devlink_deinit(ipc_imem->ipc_devlink);
imem_config_fail:
hrtimer_cancel(&ipc_imem->td_alloc_timer);
hrtimer_cancel(&ipc_imem->fast_update_timer);
hrtimer_cancel(&ipc_imem->tdupdate_timer);
hrtimer_cancel(&ipc_imem->startup_timer);
protocol_init_fail:
cancel_work_sync(&ipc_imem->run_state_worker);
ipc_task_deinit(ipc_imem->ipc_task);
ipc_task_init_fail:
kfree(ipc_imem->ipc_task);
ipc_task_fail:
kfree(ipc_imem->mmio);
mmio_init_fail:
kfree(ipc_imem);
return NULL;
}
void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
{
/* Debounce IPC_EV_IRQ. */
if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
ipc_imem->ev_irq_pending[irq] = true;
ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
NULL, 0, false);
}
}
void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
{
ipc_imem->td_update_timer_suspended = suspend;
}
/* Verify the CP execution state, copy the chip info,
* change the execution phase to ROM
*/
static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
int arg, void *msg,
size_t msgsize)
{
enum ipc_mem_exec_stage stage;
struct sk_buff *skb;
int rc = -EINVAL;
size_t size;
/* Test the CP execution state. */
stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
dev_err(ipc_imem->dev,
"Execution_stage: expected BOOT, received = %X", stage);
goto trigger_chip_info_fail;
}
/* Allocate a new sk buf for the chip info. */
size = ipc_imem->mmio->chip_info_size;
if (size > IOSM_CHIP_INFO_SIZE_MAX)
goto trigger_chip_info_fail;
skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
if (!skb) {
dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
rc = -ENOMEM;
goto trigger_chip_info_fail;
}
/* Copy the chip info characters into the ipc_skb. */
ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
/* First change to the ROM boot phase. */
dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
rc = 0;
trigger_chip_info_fail:
return rc;
}
int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
{
return ipc_task_queue_send_task(ipc_imem,
ipc_imem_devlink_trigger_chip_info_cb,
0, NULL, 0, true);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_imem.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
#include <linux/pm_runtime.h>
#include <linux/wwan.h>
#include "iosm_ipc_trace.h"
/* sub buffer size and number of sub buffer */
#define IOSM_TRC_SUB_BUFF_SIZE 131072
#define IOSM_TRC_N_SUB_BUFF 32
#define IOSM_TRC_FILE_PERM 0600
#define IOSM_TRC_DEBUGFS_TRACE "trace"
#define IOSM_TRC_DEBUGFS_TRACE_CTRL "trace_ctrl"
/**
* ipc_trace_port_rx - Receive trace packet from cp and write to relay buffer
* @ipc_imem: Pointer to iosm_imem structure
* @skb: Pointer to struct sk_buff
*/
void ipc_trace_port_rx(struct iosm_imem *ipc_imem, struct sk_buff *skb)
{
struct iosm_trace *ipc_trace = ipc_imem->trace;
if (ipc_trace->ipc_rchan)
relay_write(ipc_trace->ipc_rchan, skb->data, skb->len);
dev_kfree_skb(skb);
}
/* Creates relay file in debugfs. */
static struct dentry *
ipc_trace_create_buf_file_handler(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
*is_global = 1;
return debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
}
/* Removes relay file from debugfs. */
static int ipc_trace_remove_buf_file_handler(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
static int ipc_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
void *prev_subbuf,
size_t prev_padding)
{
if (relay_buf_full(buf)) {
pr_err_ratelimited("Relay_buf full dropping traces");
return 0;
}
return 1;
}
/* Relay interface callbacks */
static struct rchan_callbacks relay_callbacks = {
.subbuf_start = ipc_trace_subbuf_start_handler,
.create_buf_file = ipc_trace_create_buf_file_handler,
.remove_buf_file = ipc_trace_remove_buf_file_handler,
};
/* Copy the trace control mode to user buffer */
static ssize_t ipc_trace_ctrl_file_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct iosm_trace *ipc_trace = filp->private_data;
char buf[16];
int len;
mutex_lock(&ipc_trace->trc_mutex);
len = snprintf(buf, sizeof(buf), "%d\n", ipc_trace->mode);
mutex_unlock(&ipc_trace->trc_mutex);
return simple_read_from_buffer(buffer, count, ppos, buf, len);
}
/* Open and close the trace channel depending on user input */
static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
const char __user *buffer,
size_t count, loff_t *ppos)
{
struct iosm_trace *ipc_trace = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(buffer, count, 10, &val);
if (ret)
return ret;
pm_runtime_get_sync(ipc_trace->ipc_imem->dev);
mutex_lock(&ipc_trace->trc_mutex);
if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
ipc_trace->chl_id,
IPC_HP_CDEV_OPEN);
if (!ipc_trace->channel) {
ret = -EIO;
goto unlock;
}
ipc_trace->mode = TRACE_ENABLE;
} else if (val == TRACE_DISABLE && ipc_trace->mode != TRACE_DISABLE) {
ipc_trace->mode = TRACE_DISABLE;
/* close trace channel */
ipc_imem_sys_port_close(ipc_trace->ipc_imem,
ipc_trace->channel);
relay_flush(ipc_trace->ipc_rchan);
}
ret = count;
unlock:
mutex_unlock(&ipc_trace->trc_mutex);
pm_runtime_mark_last_busy(ipc_trace->ipc_imem->dev);
pm_runtime_put_autosuspend(ipc_trace->ipc_imem->dev);
return ret;
}
static const struct file_operations ipc_trace_fops = {
.open = simple_open,
.write = ipc_trace_ctrl_file_write,
.read = ipc_trace_ctrl_file_read,
};
/**
* ipc_trace_init - Create trace interface & debugfs entries
* @ipc_imem: Pointer to iosm_imem structure
*
* Returns: Pointer to trace instance on success else NULL
*/
struct iosm_trace *ipc_trace_init(struct iosm_imem *ipc_imem)
{
struct ipc_chnl_cfg chnl_cfg = { 0 };
struct iosm_trace *ipc_trace;
ipc_chnl_cfg_get(&chnl_cfg, IPC_MEM_CTRL_CHL_ID_3);
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, chnl_cfg,
IRQ_MOD_OFF);
ipc_trace = kzalloc(sizeof(*ipc_trace), GFP_KERNEL);
if (!ipc_trace)
return NULL;
ipc_trace->mode = TRACE_DISABLE;
ipc_trace->dev = ipc_imem->dev;
ipc_trace->ipc_imem = ipc_imem;
ipc_trace->chl_id = IPC_MEM_CTRL_CHL_ID_3;
mutex_init(&ipc_trace->trc_mutex);
ipc_trace->ctrl_file = debugfs_create_file(IOSM_TRC_DEBUGFS_TRACE_CTRL,
IOSM_TRC_FILE_PERM,
ipc_imem->debugfs_dir,
ipc_trace, &ipc_trace_fops);
ipc_trace->ipc_rchan = relay_open(IOSM_TRC_DEBUGFS_TRACE,
ipc_imem->debugfs_dir,
IOSM_TRC_SUB_BUFF_SIZE,
IOSM_TRC_N_SUB_BUFF,
&relay_callbacks, NULL);
return ipc_trace;
}
/**
* ipc_trace_deinit - Closing relayfs, removing debugfs entries
* @ipc_trace: Pointer to the iosm_trace data struct
*/
void ipc_trace_deinit(struct iosm_trace *ipc_trace)
{
if (!ipc_trace)
return;
debugfs_remove(ipc_trace->ctrl_file);
relay_close(ipc_trace->ipc_rchan);
mutex_destroy(&ipc_trace->trc_mutex);
kfree(ipc_trace);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_trace.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/nospec.h>
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_mux_codec.h"
#include "iosm_ipc_task_queue.h"
/* Test the link power state and send a MUX command in blocking mode. */
static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
size_t size)
{
struct iosm_mux *ipc_mux = ipc_imem->mux;
const struct mux_acb *acb = msg;
skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
ipc_imem_ul_send(ipc_mux->imem);
return 0;
}
static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
{
struct completion *completion = &ipc_mux->channel->ul_sem;
int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
0, &ipc_mux->acb,
sizeof(ipc_mux->acb), false);
if (ret) {
dev_err(ipc_mux->dev, "unable to send mux command");
return ret;
}
/* if blocking, suspend the app and wait for irq in the flash or
* crash phase. return false on timeout to indicate failure.
*/
if (blocking) {
u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
reinit_completion(completion);
if (wait_for_completion_interruptible_timeout
(completion, msecs_to_jiffies(wait_time_milliseconds)) ==
0) {
dev_err(ipc_mux->dev, "ch[%d] timeout",
ipc_mux->channel_id);
ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
return -ETIMEDOUT;
}
}
return 0;
}
/* Initialize the command header. */
static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
{
struct mux_acb *acb = &ipc_mux->acb;
struct mux_acbh *header;
header = (struct mux_acbh *)(acb->skb)->data;
header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
header->first_cmd_index = header->block_length;
header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
}
/* Add a command to the ACB. */
static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
void *param, u32 param_size)
{
struct mux_acbh *header;
struct mux_cmdh *cmdh;
struct mux_acb *acb;
acb = &ipc_mux->acb;
header = (struct mux_acbh *)(acb->skb)->data;
cmdh = (struct mux_cmdh *)
((acb->skb)->data + le32_to_cpu(header->block_length));
cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
cmdh->command_type = cpu_to_le32(cmd);
cmdh->if_id = acb->if_id;
acb->cmd = cmd;
cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
param_size);
cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
if (param)
memcpy(&cmdh->param, param, param_size);
skb_put(acb->skb, le32_to_cpu(header->block_length) +
le16_to_cpu(cmdh->cmd_len));
return cmdh;
}
/* Prepare mux Command */
static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
u32 cmd, struct mux_acb *acb,
void *param, u32 param_size)
{
struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
cmdh->command_type = cpu_to_le32(cmd);
cmdh->if_id = acb->if_id;
acb->cmd = cmd;
cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
param_size);
cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
if (param)
memcpy(&cmdh->param, param, param_size);
skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
return cmdh;
}
static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
{
struct mux_acb *acb = &ipc_mux->acb;
struct sk_buff *skb;
dma_addr_t mapping;
/* Allocate skb memory for the uplink buffer. */
skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
if (!skb)
return -ENOMEM;
/* Save the skb address. */
acb->skb = skb;
memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
return 0;
}
int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
u32 transaction_id, union mux_cmd_param *param,
size_t res_size, bool blocking, bool respond)
{
struct mux_acb *acb = &ipc_mux->acb;
union mux_type_cmdh cmdh;
int ret = 0;
acb->if_id = if_id;
ret = ipc_mux_acb_alloc(ipc_mux);
if (ret)
return ret;
if (ipc_mux->protocol == MUX_LITE) {
cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
param, res_size);
if (respond)
cmdh.ack_lite->transaction_id =
cpu_to_le32(transaction_id);
} else {
/* Initialize the ACB header. */
ipc_mux_acb_init(ipc_mux);
cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
res_size);
if (respond)
cmdh.ack_aggr->transaction_id =
cpu_to_le32(transaction_id);
}
ret = ipc_mux_acb_send(ipc_mux, blocking);
return ret;
}
void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
{
/* Inform the network interface to start/stop flow ctrl */
ipc_wwan_tx_flowctrl(session->wwan, idx, on);
}
static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
union mux_cmd_param param,
__le32 command_type, u8 if_id,
__le32 transaction_id)
{
struct mux_acb *acb = &ipc_mux->acb;
switch (le32_to_cpu(command_type)) {
case MUX_CMD_OPEN_SESSION_RESP:
case MUX_CMD_CLOSE_SESSION_RESP:
/* Resume the control application. */
acb->got_param = param;
break;
case MUX_LITE_CMD_FLOW_CTL_ACK:
/* This command type is not expected as response for
* Aggregation version of the protocol. So return non-zero.
*/
if (ipc_mux->protocol != MUX_LITE)
return -EINVAL;
dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
if_id, le32_to_cpu(transaction_id));
break;
case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
/* This command type is not expected as response for
* Lite version of the protocol. So return non-zero.
*/
if (ipc_mux->protocol == MUX_LITE)
return -EINVAL;
break;
default:
return -EINVAL;
}
acb->wanted_response = MUX_CMD_INVALID;
acb->got_response = le32_to_cpu(command_type);
complete(&ipc_mux->channel->ul_sem);
return 0;
}
static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
union mux_cmd_param *param,
__le32 command_type, u8 if_id,
__le16 cmd_len, int size)
{
struct mux_session *session;
struct hrtimer *adb_timer;
dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
if_id, le32_to_cpu(command_type));
switch (le32_to_cpu(command_type)) {
case MUX_LITE_CMD_FLOW_CTL:
case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "if_id [%d] not valid",
if_id);
return -EINVAL; /* No session interface id. */
}
session = &ipc_mux->session[if_id];
adb_timer = &ipc_mux->imem->adb_timer;
if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
/* Backward Compatibility */
if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
session->flow_ctl_mask = ~0;
/* if CP asks for FLOW CTRL Enable
* then set our internal flow control Tx flag
* to limit uplink session queueing
*/
session->net_tx_stop = true;
/* We have to call Finish ADB here.
* Otherwise any already queued data
* will be sent to CP when ADB is full
* for some other sessions.
*/
if (ipc_mux->protocol == MUX_AGGREGATION) {
ipc_mux_ul_adb_finish(ipc_mux);
ipc_imem_hrtimer_stop(adb_timer);
}
/* Update the stats */
session->flow_ctl_en_cnt++;
} else if (param->flow_ctl.mask == 0) {
/* Just reset the Flow control mask and let
* mux_flow_ctrl_low_thre_b take control on
* our internal Tx flag and enabling kernel
* flow control
*/
dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
if_id, le32_to_cpu(param->flow_ctl.mask));
/* Backward Compatibility */
if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
session->flow_ctl_mask = 0;
/* Update the stats */
session->flow_ctl_dis_cnt++;
} else {
break;
}
ipc_mux->acc_adb_size = 0;
ipc_mux->acc_payload_size = 0;
dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
le32_to_cpu(param->flow_ctl.mask));
break;
case MUX_LITE_CMD_LINK_STATUS_REPORT:
break;
default:
return -EINVAL;
}
return 0;
}
/* Decode and Send appropriate response to a command block. */
static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
__le32 trans_id = cmdh->transaction_id;
int size;
if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
cmdh->command_type, cmdh->if_id,
cmdh->transaction_id)) {
/* Unable to decode command response indicates the cmd_type
* may be a command instead of response. So try to decoding it.
*/
size = offsetof(struct mux_lite_cmdh, param) +
sizeof(cmdh->param.flow_ctl);
if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
cmdh->command_type,
cmdh->if_id,
cmdh->cmd_len, size)) {
/* Decoded command may need a response. Give the
* response according to the command type.
*/
union mux_cmd_param *mux_cmd = NULL;
size_t size = 0;
u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
if (cmdh->command_type ==
cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
mux_cmd = &cmdh->param;
mux_cmd->link_status_resp.response =
cpu_to_le32(MUX_CMD_RESP_SUCCESS);
/* response field is u32 */
size = sizeof(u32);
} else if (cmdh->command_type ==
cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
} else {
return;
}
if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
le32_to_cpu(trans_id),
mux_cmd, size, false,
true))
dev_err(ipc_mux->dev,
"if_id %d: cmd send failed",
cmdh->if_id);
}
}
}
/* Pass the DL packet to the netif layer. */
static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
struct iosm_wwan *wwan, u32 offset,
u8 service_class, struct sk_buff *skb,
u32 pkt_len)
{
struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
if (!dest_skb)
return -ENOMEM;
skb_pull(dest_skb, offset);
skb_trim(dest_skb, pkt_len);
/* Pass the packet to the netif layer. */
dest_skb->priority = service_class;
return ipc_wwan_receive(wwan, dest_skb, false, if_id);
}
/* Decode Flow Credit Table in the block */
static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
unsigned char *block)
{
struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
struct iosm_wwan *wwan;
int ul_credits;
int if_id;
if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
dev_err(ipc_mux->dev, "unexpected FCT length: %d",
fct->vfl_length);
return;
}
if_id = fct->if_id;
if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
return;
}
/* Is the session active ? */
if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
wwan = ipc_mux->session[if_id].wwan;
if (!wwan) {
dev_err(ipc_mux->dev, "session Net ID is NULL");
return;
}
ul_credits = le32_to_cpu(fct->vfl.nr_of_bytes);
dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
/* Update the Flow Credit information from ADB */
ipc_mux->session[if_id].ul_flow_credits += ul_credits;
/* Check whether the TX can be started */
if (ipc_mux->session[if_id].ul_flow_credits > 0) {
ipc_mux->session[if_id].net_tx_stop = false;
ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
ipc_mux->session[if_id].if_id, false);
}
}
/* Decode non-aggregated datagram */
static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
struct sk_buff *skb)
{
u32 pad_len, packet_offset, adgh_len;
struct iosm_wwan *wwan;
struct mux_adgh *adgh;
u8 *block = skb->data;
int rc = 0;
u8 if_id;
adgh = (struct mux_adgh *)block;
if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "invalid ADGH signature received");
return;
}
if_id = adgh->if_id;
if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
return;
}
/* Is the session active ? */
if_id = array_index_nospec(if_id, IPC_MEM_MUX_IP_SESSION_ENTRIES);
wwan = ipc_mux->session[if_id].wwan;
if (!wwan) {
dev_err(ipc_mux->dev, "session Net ID is NULL");
return;
}
/* Store the pad len for the corresponding session
* Pad bytes as negotiated in the open session less the header size
* (see session management chapter for details).
* If resulting padding is zero or less, the additional head padding is
* omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
* omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
* set to zero
*/
pad_len =
ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
packet_offset = sizeof(*adgh) + pad_len;
if_id += ipc_mux->wwan_q_offset;
adgh_len = le16_to_cpu(adgh->length);
/* Pass the packet to the netif layer */
rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
adgh->service_class, skb,
adgh_len - packet_offset);
if (rc) {
dev_err(ipc_mux->dev, "mux adgh decoding error");
return;
}
ipc_mux->session[if_id].flush = 1;
}
static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
struct mux_cmdh *cmdh, int size)
{
u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
union mux_cmd_param *cmd_p = NULL;
u32 cmd = link_st;
u32 trans_id;
if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
cmdh->command_type, cmdh->if_id,
cmdh->cmd_len, size)) {
size = 0;
if (cmdh->command_type == cpu_to_le32(link_st)) {
cmd_p = &cmdh->param;
cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
} else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
(cmdh->command_type == cpu_to_le32(fctl_dis))) {
cmd = fctl_ack;
} else {
return;
}
trans_id = le32_to_cpu(cmdh->transaction_id);
ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
trans_id, cmd_p, size, false, true);
}
}
/* Decode an aggregated command block. */
static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
struct mux_acbh *acbh;
struct mux_cmdh *cmdh;
u32 next_cmd_index;
u8 *block;
int size;
acbh = (struct mux_acbh *)(skb->data);
block = (u8 *)(skb->data);
next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
next_cmd_index = array_index_nospec(next_cmd_index,
sizeof(struct mux_cmdh));
while (next_cmd_index != 0) {
cmdh = (struct mux_cmdh *)&block[next_cmd_index];
next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
cmdh->command_type,
cmdh->if_id,
cmdh->transaction_id)) {
size = offsetof(struct mux_cmdh, param) +
sizeof(cmdh->param.flow_ctl);
ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
}
}
}
/* process datagram */
static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
struct mux_adth_dg *dg, struct sk_buff *skb,
int if_id, int nr_of_dg)
{
u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
u32 packet_offset, i, rc, dg_len;
for (i = 0; i < nr_of_dg; i++, dg++) {
if (le32_to_cpu(dg->datagram_index)
< sizeof(struct mux_adbh))
goto dg_error;
/* Is the packet inside of the ADB */
if (le32_to_cpu(dg->datagram_index) >=
le32_to_cpu(adbh->block_length)) {
goto dg_error;
} else {
packet_offset =
le32_to_cpu(dg->datagram_index) +
dl_head_pad_len;
dg_len = le16_to_cpu(dg->datagram_length);
/* Pass the packet to the netif layer. */
rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
packet_offset,
dg->service_class, skb,
dg_len - dl_head_pad_len);
if (rc)
goto dg_error;
}
}
return 0;
dg_error:
return -1;
}
/* Decode an aggregated data block. */
static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
struct sk_buff *skb)
{
struct mux_adth_dg *dg;
struct iosm_wwan *wwan;
struct mux_adbh *adbh;
struct mux_adth *adth;
int nr_of_dg, if_id;
u32 adth_index;
u8 *block;
block = skb->data;
adbh = (struct mux_adbh *)block;
/* Process the aggregated datagram tables. */
adth_index = le32_to_cpu(adbh->first_table_index);
/* Has CP sent an empty ADB ? */
if (adth_index < 1) {
dev_err(ipc_mux->dev, "unexpected empty ADB");
goto adb_decode_err;
}
/* Loop through mixed session tables. */
while (adth_index) {
/* Get the reference to the table header. */
adth = (struct mux_adth *)(block + adth_index);
/* Get the interface id and map it to the netif id. */
if_id = adth->if_id;
if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
goto adb_decode_err;
if_id = array_index_nospec(if_id,
IPC_MEM_MUX_IP_SESSION_ENTRIES);
/* Is the session active ? */
wwan = ipc_mux->session[if_id].wwan;
if (!wwan)
goto adb_decode_err;
/* Consistency checks for aggregated datagram table. */
if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
goto adb_decode_err;
if (le16_to_cpu(adth->table_length) < sizeof(struct mux_adth))
goto adb_decode_err;
/* Calculate the number of datagrams. */
nr_of_dg = (le16_to_cpu(adth->table_length) -
sizeof(struct mux_adth)) /
sizeof(struct mux_adth_dg);
/* Is the datagram table empty ? */
if (nr_of_dg < 1) {
dev_err(ipc_mux->dev,
"adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
adth_index, nr_of_dg,
le32_to_cpu(adth->next_table_index));
/* Move to the next aggregated datagram table. */
adth_index = le32_to_cpu(adth->next_table_index);
continue;
}
/* New aggregated datagram table. */
dg = adth->dg;
if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
nr_of_dg) < 0)
goto adb_decode_err;
/* mark session for final flush */
ipc_mux->session[if_id].flush = 1;
/* Move to the next aggregated datagram table. */
adth_index = le32_to_cpu(adth->next_table_index);
}
adb_decode_err:
return;
}
/**
* ipc_mux_dl_decode - Route the DL packet through the IP MUX layer
* depending on Header.
* @ipc_mux: Pointer to MUX data-struct
* @skb: Pointer to ipc_skb.
*/
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
u32 signature;
if (!skb->data)
return;
/* Decode the MUX header type. */
signature = le32_to_cpup((__le32 *)skb->data);
switch (signature) {
case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */
mux_dl_adb_decode(ipc_mux, skb);
break;
case IOSM_AGGR_MUX_SIG_ADGH:
ipc_mux_dl_adgh_decode(ipc_mux, skb);
break;
case MUX_SIG_FCTH:
ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
break;
case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */
ipc_mux_dl_acb_decode(ipc_mux, skb);
break;
case MUX_SIG_CMDH:
ipc_mux_dl_cmd_decode(ipc_mux, skb);
break;
default:
dev_err(ipc_mux->dev, "invalid ABH signature");
}
ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
}
static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
struct mux_adb *ul_adb, u32 type)
{
/* Take the first element of the free list. */
struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
u32 *next_tb_id;
int qlt_size;
u32 if_id;
if (!skb)
return -EBUSY; /* Wait for a free ADB skb. */
/* Mark it as UL ADB to select the right free operation. */
IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
switch (type) {
case IOSM_AGGR_MUX_SIG_ADBH:
/* Save the ADB memory settings. */
ul_adb->dest_skb = skb;
ul_adb->buf = skb->data;
ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
/* reset statistic counter */
ul_adb->if_cnt = 0;
ul_adb->payload_size = 0;
ul_adb->dg_cnt_total = 0;
/* Initialize the ADBH. */
ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
ul_adb->adbh->block_length =
cpu_to_le32(sizeof(struct mux_adbh));
next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
ul_adb->next_table_index = next_tb_id;
/* Clear the local copy of DGs for new ADB */
memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
/* Clear the DG count and QLT updated status for new ADB */
for (if_id = 0; if_id < no_if; if_id++) {
ul_adb->dg_count[if_id] = 0;
ul_adb->qlt_updated[if_id] = 0;
}
break;
case IOSM_AGGR_MUX_SIG_ADGH:
/* Save the ADB memory settings. */
ul_adb->dest_skb = skb;
ul_adb->buf = skb->data;
ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
/* reset statistic counter */
ul_adb->if_cnt = 0;
ul_adb->payload_size = 0;
ul_adb->dg_cnt_total = 0;
ul_adb->adgh = (struct mux_adgh *)skb->data;
memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
break;
case MUX_SIG_QLTH:
qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
(MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
dev_err(ipc_mux->dev,
"can't support. QLT size:%d SKB size: %d",
qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
return -ERANGE;
}
ul_adb->qlth_skb = skb;
memset((ul_adb->qlth_skb)->data, 0, qlt_size);
skb_put(skb, qlt_size);
break;
}
return 0;
}
static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
{
struct mux_adb *ul_adb = &ipc_mux->ul_adb;
u16 adgh_len;
long long bytes;
char *str;
if (!ul_adb->dest_skb) {
dev_err(ipc_mux->dev, "no dest skb");
return;
}
adgh_len = le16_to_cpu(ul_adb->adgh->length);
skb_put(ul_adb->dest_skb, adgh_len);
skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
ul_adb->dest_skb = NULL;
if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
struct mux_session *session;
session = &ipc_mux->session[ul_adb->adgh->if_id];
str = "available_credits";
bytes = (long long)session->ul_flow_credits;
} else {
str = "pend_bytes";
bytes = ipc_mux->ul_data_pend_bytes;
ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
adgh_len;
}
dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
str, bytes);
}
static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
struct mux_adb *ul_adb, int *out_offset)
{
int i, qlt_size, offset = *out_offset;
struct mux_qlth *p_adb_qlt;
struct mux_adth_dg *dg;
struct mux_adth *adth;
u16 adth_dg_size;
u32 *next_tb_id;
qlt_size = offsetof(struct mux_qlth, ql) +
MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
for (i = 0; i < ipc_mux->nr_sessions; i++) {
if (ul_adb->dg_count[i] > 0) {
adth_dg_size = offsetof(struct mux_adth, dg) +
ul_adb->dg_count[i] * sizeof(*dg);
*ul_adb->next_table_index = offset;
adth = (struct mux_adth *)&ul_adb->buf[offset];
next_tb_id = (unsigned int *)&adth->next_table_index;
ul_adb->next_table_index = next_tb_id;
offset += adth_dg_size;
adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
adth->if_id = i;
adth->table_length = cpu_to_le16(adth_dg_size);
adth_dg_size -= offsetof(struct mux_adth, dg);
memcpy(adth->dg, ul_adb->dg[i], adth_dg_size);
ul_adb->if_cnt++;
}
if (ul_adb->qlt_updated[i]) {
*ul_adb->next_table_index = offset;
p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
ul_adb->next_table_index =
(u32 *)&p_adb_qlt->next_table_index;
memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
offset += qlt_size;
}
}
*out_offset = offset;
}
/**
* ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
* @ipc_mux: Pointer to MUX data-struct.
*/
void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
{
bool ul_data_pend = false;
struct mux_adb *ul_adb;
unsigned long flags;
int offset;
ul_adb = &ipc_mux->ul_adb;
if (!ul_adb->dest_skb)
return;
offset = *ul_adb->next_table_index;
ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
ul_adb->adbh->block_length = cpu_to_le32(offset);
if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
ul_adb->dest_skb = NULL;
return;
}
*ul_adb->next_table_index = 0;
ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
__skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
ul_adb->dest_skb = NULL;
/* Updates the TDs with ul_list */
ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
/* Delay the doorbell irq */
if (ul_data_pend)
ipc_imem_td_update_timer_start(ipc_mux->imem);
ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
ipc_mux->acc_payload_size += ul_adb->payload_size;
ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
}
/* Allocates an ADB from the free list and initializes it with ADBH */
static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
struct mux_adb *adb, int *size_needed,
u32 type)
{
bool ret_val = false;
int status;
if (!adb->dest_skb) {
/* Allocate memory for the ADB including of the
* datagram table header.
*/
status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
if (status)
/* Is a pending ADB available ? */
ret_val = true; /* None. */
/* Update size need to zero only for new ADB memory */
*size_needed = 0;
}
return ret_val;
}
/* Informs the network stack to stop sending further packets for all opened
* sessions
*/
static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
{
struct mux_session *session;
int idx;
for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
session = &ipc_mux->session[idx];
if (!session->wwan)
continue;
session->net_tx_stop = true;
}
}
/* Sends Queue Level Table of all opened sessions */
static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
{
struct ipc_mem_lite_gen_tbl *qlt;
struct mux_session *session;
bool qlt_updated = false;
int i;
int qlt_size;
if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
return qlt_updated;
qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
session = &ipc_mux->session[i];
if (!session->wwan || session->flow_ctl_mask)
continue;
if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
MUX_SIG_QLTH)) {
dev_err(ipc_mux->dev,
"no reserved mem to send QLT of if_id: %d", i);
break;
}
/* Prepare QLT */
qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
->data;
qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
qlt->length = cpu_to_le16(qlt_size);
qlt->if_id = i;
qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
qlt->reserved[0] = 0;
qlt->reserved[1] = 0;
qlt->vfl.nr_of_bytes = cpu_to_le32(session->ul_list.qlen);
/* Add QLT to the transfer list. */
skb_queue_tail(&ipc_mux->channel->ul_list,
ipc_mux->ul_adb.qlth_skb);
qlt_updated = true;
ipc_mux->ul_adb.qlth_skb = NULL;
}
if (qlt_updated)
/* Updates the TDs with ul_list */
(void)ipc_imem_ul_write_td(ipc_mux->imem);
return qlt_updated;
}
/* Checks the available credits for the specified session and returns
* number of packets for which credits are available.
*/
static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
struct mux_session *session,
struct sk_buff_head *ul_list,
int max_nr_of_pkts)
{
int pkts_to_send = 0;
struct sk_buff *skb;
int credits = 0;
if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
credits = session->ul_flow_credits;
if (credits <= 0) {
dev_dbg(ipc_mux->dev,
"FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
session->if_id, session->ul_flow_credits,
session->ul_list.qlen); /* nr_of_bytes */
return 0;
}
} else {
credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
ipc_mux->ul_data_pend_bytes;
if (credits <= 0) {
ipc_mux_stop_tx_for_all_sessions(ipc_mux);
dev_dbg(ipc_mux->dev,
"if_id[%d] encod. fail Bytes: %llu, thresh: %d",
session->if_id, ipc_mux->ul_data_pend_bytes,
IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
return 0;
}
}
/* Check if there are enough credits/bytes available to send the
* requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
* depending on available credits.
*/
skb_queue_walk(ul_list, skb)
{
if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
break;
credits -= skb->len;
pkts_to_send++;
}
return pkts_to_send;
}
/* Encode the UL IP packet according to Lite spec. */
static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
struct mux_session *session,
struct sk_buff_head *ul_list,
struct mux_adb *adb, int nr_of_pkts)
{
int offset = sizeof(struct mux_adgh);
int adb_updated = -EINVAL;
struct sk_buff *src_skb;
int aligned_size = 0;
int nr_of_skb = 0;
u32 pad_len = 0;
/* Re-calculate the number of packets depending on number of bytes to be
* processed/available credits.
*/
nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
nr_of_pkts);
/* If calculated nr_of_pkts from available credits is <= 0
* then nothing to do.
*/
if (nr_of_pkts <= 0)
return 0;
/* Read configured UL head_pad_length for session.*/
if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
/* Process all pending UL packets for this session
* depending on the allocated datagram table size.
*/
while (nr_of_pkts > 0) {
/* get destination skb allocated */
if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "no reserved memory for ADGH");
return -ENOMEM;
}
/* Peek at the head of the list. */
src_skb = skb_peek(ul_list);
if (!src_skb) {
dev_err(ipc_mux->dev,
"skb peek return NULL with count : %d",
nr_of_pkts);
break;
}
/* Calculate the memory value. */
aligned_size = ALIGN((pad_len + src_skb->len), 4);
ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
if (ipc_mux->size_needed > adb->size) {
dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
ipc_mux->size_needed, adb->size);
/* Return 1 if any IP packet is added to the transfer
* list.
*/
return nr_of_skb ? 1 : 0;
}
/* Add buffer (without head padding to next pending transfer) */
memcpy(adb->buf + offset + pad_len, src_skb->data,
src_skb->len);
adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
adb->adgh->if_id = session_id;
adb->adgh->length =
cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
src_skb->len);
adb->adgh->service_class = src_skb->priority;
adb->adgh->next_count = --nr_of_pkts;
adb->dg_cnt_total++;
adb->payload_size += src_skb->len;
if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
/* Decrement the credit value as we are processing the
* datagram from the UL list.
*/
session->ul_flow_credits -= src_skb->len;
/* Remove the processed elements and free it. */
src_skb = skb_dequeue(ul_list);
dev_kfree_skb(src_skb);
nr_of_skb++;
ipc_mux_ul_adgh_finish(ipc_mux);
}
if (nr_of_skb) {
/* Send QLT info to modem if pending bytes > high watermark
* in case of mux lite
*/
if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
ipc_mux->ul_data_pend_bytes >=
IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
else
adb_updated = 1;
/* Updates the TDs with ul_list */
(void)ipc_imem_ul_write_td(ipc_mux->imem);
}
return adb_updated;
}
/**
* ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
* @ipc_mux: pointer to MUX instance data
* @p_adb: pointer to UL aggegated data block
* @session_id: session id
* @qlth_n_ql_size: Length (in bytes) of the datagram table
* @ul_list: pointer to skb buffer head
*/
void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
int session_id, int qlth_n_ql_size,
struct sk_buff_head *ul_list)
{
int qlevel = ul_list->qlen;
struct mux_qlth *p_qlt;
p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
/* Initialize QLTH if not been done */
if (p_adb->qlt_updated[session_id] == 0) {
p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
p_qlt->if_id = session_id;
p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
p_qlt->reserved = 0;
p_qlt->reserved2 = 0;
}
/* Update Queue Level information always */
p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
p_adb->qlt_updated[session_id] = 1;
}
/* Update the next table index. */
static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
int session_id,
struct sk_buff_head *ul_list,
struct mux_adth_dg *dg,
int aligned_size,
u32 qlth_n_ql_size,
struct mux_adb *adb,
struct sk_buff *src_skb)
{
ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
qlth_n_ql_size, ul_list);
ipc_mux_ul_adb_finish(ipc_mux);
if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
IOSM_AGGR_MUX_SIG_ADBH))
return -ENOMEM;
ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
ipc_mux->size_needed += offsetof(struct mux_adth, dg);
ipc_mux->size_needed += qlth_n_ql_size;
ipc_mux->size_needed += sizeof(*dg) + aligned_size;
return 0;
}
/* Process encode session UL data. */
static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
struct mux_adth_dg *dg,
struct sk_buff_head *ul_list,
struct sk_buff *src_skb, int session_id,
int pkt_to_send, u32 qlth_n_ql_size,
int *out_offset, int head_pad_len)
{
int aligned_size;
int offset = *out_offset;
unsigned long flags;
int nr_of_skb = 0;
while (pkt_to_send > 0) {
/* Peek at the head of the list. */
src_skb = skb_peek(ul_list);
if (!src_skb) {
dev_err(ipc_mux->dev,
"skb peek return NULL with count : %d",
pkt_to_send);
return -1;
}
aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
ipc_mux->size_needed += sizeof(*dg) + aligned_size;
if (ipc_mux->size_needed > adb->size ||
((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
*adb->next_table_index = offset;
if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
ul_list, dg,
aligned_size,
qlth_n_ql_size, adb,
src_skb) < 0)
return -ENOMEM;
nr_of_skb = 0;
offset = le32_to_cpu(adb->adbh->block_length);
/* Load pointer to next available datagram entry */
dg = adb->dg[session_id] + adb->dg_count[session_id];
}
/* Add buffer without head padding to next pending transfer. */
memcpy(adb->buf + offset + head_pad_len,
src_skb->data, src_skb->len);
/* Setup datagram entry. */
dg->datagram_index = cpu_to_le32(offset);
dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
dg->service_class = (((struct sk_buff *)src_skb)->priority);
dg->reserved = 0;
adb->dg_cnt_total++;
adb->payload_size += le16_to_cpu(dg->datagram_length);
dg++;
adb->dg_count[session_id]++;
offset += aligned_size;
/* Remove the processed elements and free it. */
spin_lock_irqsave(&ul_list->lock, flags);
src_skb = __skb_dequeue(ul_list);
spin_unlock_irqrestore(&ul_list->lock, flags);
dev_kfree_skb(src_skb);
nr_of_skb++;
pkt_to_send--;
}
*out_offset = offset;
return nr_of_skb;
}
/* Process encode session UL data to ADB. */
static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
struct mux_session *session,
struct sk_buff_head *ul_list, struct mux_adb *adb,
int pkt_to_send)
{
int adb_updated = -EINVAL;
int head_pad_len, offset;
struct sk_buff *src_skb = NULL;
struct mux_adth_dg *dg;
u32 qlth_n_ql_size;
/* If any of the opened session has set Flow Control ON then limit the
* UL data to mux_flow_ctrl_high_thresh_b bytes
*/
if (ipc_mux->ul_data_pend_bytes >=
IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
ipc_mux_stop_tx_for_all_sessions(ipc_mux);
return adb_updated;
}
qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
head_pad_len = session->ul_head_pad_len;
if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
IOSM_AGGR_MUX_SIG_ADBH))
return -ENOMEM;
offset = le32_to_cpu(adb->adbh->block_length);
if (ipc_mux->size_needed == 0)
ipc_mux->size_needed = offset;
/* Calculate the size needed for ADTH, QLTH and QL*/
if (adb->dg_count[session_id] == 0) {
ipc_mux->size_needed += offsetof(struct mux_adth, dg);
ipc_mux->size_needed += qlth_n_ql_size;
}
dg = adb->dg[session_id] + adb->dg_count[session_id];
if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
session_id, pkt_to_send, qlth_n_ql_size, &offset,
head_pad_len) > 0) {
adb_updated = 1;
*adb->next_table_index = offset;
ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
qlth_n_ql_size, ul_list);
adb->adbh->block_length = cpu_to_le32(offset);
}
return adb_updated;
}
bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
{
struct sk_buff_head *ul_list;
struct mux_session *session;
int updated = 0;
int session_id;
int dg_n;
int i;
if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
ipc_mux->adb_prep_ongoing)
return false;
ipc_mux->adb_prep_ongoing = true;
for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
session_id = ipc_mux->rr_next_session;
session = &ipc_mux->session[session_id];
/* Go to next handle rr_next_session overflow */
ipc_mux->rr_next_session++;
if (ipc_mux->rr_next_session >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
ipc_mux->rr_next_session = 0;
if (!session->wwan || session->flow_ctl_mask ||
session->net_tx_stop)
continue;
ul_list = &session->ul_list;
/* Is something pending in UL and flow ctrl off */
dg_n = skb_queue_len(ul_list);
if (dg_n > MUX_MAX_UL_DG_ENTRIES)
dg_n = MUX_MAX_UL_DG_ENTRIES;
if (dg_n == 0)
/* Nothing to do for ipc_mux session
* -> try next session id.
*/
continue;
if (ipc_mux->protocol == MUX_LITE)
updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
session, ul_list,
&ipc_mux->ul_adb,
dg_n);
else
updated = mux_ul_adb_encode(ipc_mux, session_id,
session, ul_list,
&ipc_mux->ul_adb,
dg_n);
}
ipc_mux->adb_prep_ongoing = false;
return updated == 1;
}
/* Calculates the Payload from any given ADB. */
static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
struct mux_adbh *p_adbh)
{
struct mux_adth_dg *dg;
struct mux_adth *adth;
u32 payload_size = 0;
u32 next_table_idx;
int nr_of_dg, i;
/* Process the aggregated datagram tables. */
next_table_idx = le32_to_cpu(p_adbh->first_table_index);
if (next_table_idx < sizeof(struct mux_adbh)) {
dev_err(ipc_mux->dev, "unexpected empty ADB");
return payload_size;
}
while (next_table_idx != 0) {
/* Get the reference to the table header. */
adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
nr_of_dg = (le16_to_cpu(adth->table_length) -
sizeof(struct mux_adth)) /
sizeof(struct mux_adth_dg);
if (nr_of_dg <= 0)
return payload_size;
dg = adth->dg;
for (i = 0; i < nr_of_dg; i++, dg++) {
if (le32_to_cpu(dg->datagram_index) <
sizeof(struct mux_adbh)) {
return payload_size;
}
payload_size +=
le16_to_cpu(dg->datagram_length);
}
}
next_table_idx = le32_to_cpu(adth->next_table_index);
}
return payload_size;
}
void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
union mux_type_header hr;
u16 adgh_len;
int payload;
if (ipc_mux->protocol == MUX_LITE) {
hr.adgh = (struct mux_adgh *)skb->data;
adgh_len = le16_to_cpu(hr.adgh->length);
if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
ipc_mux->ul_flow == MUX_UL)
ipc_mux->ul_data_pend_bytes =
ipc_mux->ul_data_pend_bytes - adgh_len;
} else {
hr.adbh = (struct mux_adbh *)(skb->data);
payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
ipc_mux->ul_data_pend_bytes -= payload;
}
if (ipc_mux->ul_flow == MUX_UL)
dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
ipc_mux->ul_data_pend_bytes);
/* Reset the skb settings. */
skb_trim(skb, 0);
/* Add the consumed ADB to the free list. */
skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
}
/* Start the NETIF uplink send transfer in MUX mode. */
static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
struct iosm_mux *ipc_mux = ipc_imem->mux;
bool ul_data_pend = false;
/* Add session UL data to a ADB and ADGH */
ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
if (ul_data_pend) {
if (ipc_mux->protocol == MUX_AGGREGATION)
ipc_imem_adb_timer_start(ipc_mux->imem);
/* Delay the doorbell irq */
ipc_imem_td_update_timer_start(ipc_mux->imem);
}
/* reset the debounce flag */
ipc_mux->ev_mux_net_transmit_pending = false;
return 0;
}
int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
struct sk_buff *skb)
{
struct mux_session *session = &ipc_mux->session[if_id];
int ret = -EINVAL;
if (ipc_mux->channel &&
ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
dev_err(ipc_mux->dev,
"channel state is not IMEM_CHANNEL_ACTIVE");
goto out;
}
if (!session->wwan) {
dev_err(ipc_mux->dev, "session net ID is NULL");
ret = -EFAULT;
goto out;
}
/* Session is under flow control.
* Check if packet can be queued in session list, if not
* suspend net tx
*/
if (skb_queue_len(&session->ul_list) >=
(session->net_tx_stop ?
IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
(IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
ret = -EBUSY;
goto out;
}
/* Add skb to the uplink skb accumulator. */
skb_queue_tail(&session->ul_list, skb);
/* Inform the IPC kthread to pass uplink IP packets to CP. */
if (!ipc_mux->ev_mux_net_transmit_pending) {
ipc_mux->ev_mux_net_transmit_pending = true;
ret = ipc_task_queue_send_task(ipc_mux->imem,
ipc_mux_tq_ul_trigger_encode, 0,
NULL, 0, false);
if (ret)
goto out;
}
dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
skb->len, skb->truesize, skb->priority);
ret = 0;
out:
return ret;
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include "iosm_ipc_mux_codec.h"
/* At the begin of the runtime phase the IP MUX channel shall created. */
static int ipc_mux_channel_create(struct iosm_mux *ipc_mux)
{
int channel_id;
channel_id = ipc_imem_channel_alloc(ipc_mux->imem, ipc_mux->instance_id,
IPC_CTYPE_WWAN);
if (channel_id < 0) {
dev_err(ipc_mux->dev,
"allocation of the MUX channel id failed");
ipc_mux->state = MUX_S_ERROR;
ipc_mux->event = MUX_E_NOT_APPLICABLE;
goto no_channel;
}
/* Establish the MUX channel in blocking mode. */
ipc_mux->channel = ipc_imem_channel_open(ipc_mux->imem, channel_id,
IPC_HP_NET_CHANNEL_INIT);
if (!ipc_mux->channel) {
dev_err(ipc_mux->dev, "ipc_imem_channel_open failed");
ipc_mux->state = MUX_S_ERROR;
ipc_mux->event = MUX_E_NOT_APPLICABLE;
return -ENODEV; /* MUX channel is not available. */
}
/* Define the MUX active state properties. */
ipc_mux->state = MUX_S_ACTIVE;
ipc_mux->event = MUX_E_NO_ORDERS;
no_channel:
return channel_id;
}
/* Reset the session/if id state. */
static void ipc_mux_session_free(struct iosm_mux *ipc_mux, int if_id)
{
struct mux_session *if_entry;
if_entry = &ipc_mux->session[if_id];
/* Reset the session state. */
if_entry->wwan = NULL;
}
/* Create and send the session open command. */
static struct mux_cmd_open_session_resp *
ipc_mux_session_open_send(struct iosm_mux *ipc_mux, int if_id)
{
struct mux_cmd_open_session_resp *open_session_resp;
struct mux_acb *acb = &ipc_mux->acb;
union mux_cmd_param param;
/* open_session commands to one ACB and start transmission. */
param.open_session.flow_ctrl = 0;
param.open_session.ipv4v6_hints = 0;
param.open_session.reserved2 = 0;
param.open_session.dl_head_pad_len = cpu_to_le32(IPC_MEM_DL_ETH_OFFSET);
/* Finish and transfer ACB. The user thread is suspended.
* It is a blocking function call, until CP responds or timeout.
*/
acb->wanted_response = MUX_CMD_OPEN_SESSION_RESP;
if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_OPEN_SESSION, if_id, 0,
¶m, sizeof(param.open_session), true,
false) ||
acb->got_response != MUX_CMD_OPEN_SESSION_RESP) {
dev_err(ipc_mux->dev, "if_id %d: OPEN_SESSION send failed",
if_id);
return NULL;
}
open_session_resp = &ipc_mux->acb.got_param.open_session_resp;
if (open_session_resp->response != cpu_to_le32(MUX_CMD_RESP_SUCCESS)) {
dev_err(ipc_mux->dev,
"if_id %d,session open failed,response=%d", if_id,
open_session_resp->response);
return NULL;
}
return open_session_resp;
}
/* Open the first IP session. */
static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
struct mux_session_open *session_open)
{
struct mux_cmd_open_session_resp *open_session_resp;
int if_id;
/* Search for a free session interface id. */
if_id = le32_to_cpu(session_open->if_id);
if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "invalid interface id=%d", if_id);
return false;
}
/* Create and send the session open command.
* It is a blocking function call, until CP responds or timeout.
*/
open_session_resp = ipc_mux_session_open_send(ipc_mux, if_id);
if (!open_session_resp) {
ipc_mux_session_free(ipc_mux, if_id);
session_open->if_id = cpu_to_le32(-1);
return false;
}
/* Initialize the uplink skb accumulator. */
skb_queue_head_init(&ipc_mux->session[if_id].ul_list);
ipc_mux->session[if_id].dl_head_pad_len = IPC_MEM_DL_ETH_OFFSET;
ipc_mux->session[if_id].ul_head_pad_len =
le32_to_cpu(open_session_resp->ul_head_pad_len);
ipc_mux->session[if_id].wwan = ipc_mux->wwan;
/* Reset the flow ctrl stats of the session */
ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
ipc_mux->session[if_id].ul_flow_credits = 0;
ipc_mux->session[if_id].net_tx_stop = false;
ipc_mux->session[if_id].flow_ctl_mask = 0;
/* Save and return the assigned if id. */
session_open->if_id = cpu_to_le32(if_id);
ipc_mux->nr_sessions++;
return true;
}
/* Free pending session UL packet. */
static void ipc_mux_session_reset(struct iosm_mux *ipc_mux, int if_id)
{
/* Reset the session/if id state. */
ipc_mux_session_free(ipc_mux, if_id);
/* Empty the uplink skb accumulator. */
skb_queue_purge(&ipc_mux->session[if_id].ul_list);
}
static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
struct mux_session_close *msg)
{
int if_id;
/* Copy the session interface id. */
if_id = le32_to_cpu(msg->if_id);
if (if_id < 0 || if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "invalid session id %d", if_id);
return;
}
/* Create and send the session close command.
* It is a blocking function call, until CP responds or timeout.
*/
if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_CLOSE_SESSION, if_id, 0,
NULL, 0, true, false))
dev_err(ipc_mux->dev, "if_id %d: CLOSE_SESSION send failed",
if_id);
/* Reset the flow ctrl stats of the session */
ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
ipc_mux->session[if_id].flow_ctl_mask = 0;
ipc_mux_session_reset(ipc_mux, if_id);
ipc_mux->nr_sessions--;
}
static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
struct mux_channel_close *channel_close_p)
{
int i;
/* Free pending session UL packet. */
for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++)
if (ipc_mux->session[i].wwan)
ipc_mux_session_reset(ipc_mux, i);
ipc_imem_channel_close(ipc_mux->imem, ipc_mux->channel_id);
/* Reset the MUX object. */
ipc_mux->state = MUX_S_INACTIVE;
ipc_mux->event = MUX_E_INACTIVE;
}
/* CP has interrupted AP. If AP is in IP MUX mode, execute the pending ops. */
static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg)
{
enum mux_event order;
bool success;
int ret = -EIO;
if (!ipc_mux->initialized) {
ret = -EAGAIN;
goto out;
}
order = msg->common.event;
switch (ipc_mux->state) {
case MUX_S_INACTIVE:
if (order != MUX_E_MUX_SESSION_OPEN)
goto out; /* Wait for the request to open a session */
if (ipc_mux->event == MUX_E_INACTIVE)
/* Establish the MUX channel and the new state. */
ipc_mux->channel_id = ipc_mux_channel_create(ipc_mux);
if (ipc_mux->state != MUX_S_ACTIVE) {
ret = ipc_mux->channel_id; /* Missing the MUX channel */
goto out;
}
/* Disable the TD update timer and open the first IP session. */
ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
success = ipc_mux_session_open(ipc_mux, &msg->session_open);
ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
if (success)
ret = ipc_mux->channel_id;
goto out;
case MUX_S_ACTIVE:
switch (order) {
case MUX_E_MUX_SESSION_OPEN:
/* Disable the TD update timer and open a session */
ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
success = ipc_mux_session_open(ipc_mux,
&msg->session_open);
ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
if (success)
ret = ipc_mux->channel_id;
goto out;
case MUX_E_MUX_SESSION_CLOSE:
/* Release an IP session. */
ipc_mux->event = MUX_E_MUX_SESSION_CLOSE;
ipc_mux_session_close(ipc_mux, &msg->session_close);
if (!ipc_mux->nr_sessions) {
ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
ipc_mux_channel_close(ipc_mux,
&msg->channel_close);
}
ret = ipc_mux->channel_id;
goto out;
case MUX_E_MUX_CHANNEL_CLOSE:
/* Close the MUX channel pipes. */
ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
ipc_mux_channel_close(ipc_mux, &msg->channel_close);
ret = ipc_mux->channel_id;
goto out;
default:
/* Invalid order. */
goto out;
}
default:
dev_err(ipc_mux->dev,
"unexpected MUX transition: state=%d, event=%d",
ipc_mux->state, ipc_mux->event);
}
out:
return ret;
}
struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
struct iosm_imem *imem)
{
struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
int i, j, ul_tds, ul_td_size;
struct sk_buff_head *free_list;
struct sk_buff *skb;
int qlt_size;
if (!ipc_mux)
return NULL;
ipc_mux->protocol = mux_cfg->protocol;
ipc_mux->ul_flow = mux_cfg->ul_flow;
ipc_mux->instance_id = mux_cfg->instance_id;
ipc_mux->wwan_q_offset = 0;
ipc_mux->pcie = imem->pcie;
ipc_mux->imem = imem;
ipc_mux->ipc_protocol = imem->ipc_protocol;
ipc_mux->dev = imem->dev;
ipc_mux->wwan = imem->wwan;
/* Get the reference to the UL ADB list. */
free_list = &ipc_mux->ul_adb.free_list;
/* Initialize the list with free ADB. */
skb_queue_head_init(free_list);
ul_td_size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
ul_tds = IPC_MEM_MAX_TDS_MUX_LITE_UL;
ipc_mux->ul_adb.dest_skb = NULL;
ipc_mux->initialized = true;
ipc_mux->adb_prep_ongoing = false;
ipc_mux->size_needed = 0;
ipc_mux->ul_data_pend_bytes = 0;
ipc_mux->state = MUX_S_INACTIVE;
ipc_mux->ev_mux_net_transmit_pending = false;
ipc_mux->tx_transaction_id = 0;
ipc_mux->rr_next_session = 0;
ipc_mux->event = MUX_E_INACTIVE;
ipc_mux->channel_id = -1;
ipc_mux->channel = NULL;
if (ipc_mux->protocol != MUX_LITE) {
qlt_size = offsetof(struct mux_qlth, ql) +
MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
GFP_ATOMIC);
if (!ipc_mux->ul_adb.pp_qlt[i]) {
for (j = i - 1; j >= 0; j--)
kfree(ipc_mux->ul_adb.pp_qlt[j]);
kfree(ipc_mux);
return NULL;
}
}
ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
}
/* Allocate the list of UL ADB. */
for (i = 0; i < ul_tds; i++) {
dma_addr_t mapping;
skb = ipc_pcie_alloc_skb(ipc_mux->pcie, ul_td_size, GFP_ATOMIC,
&mapping, DMA_TO_DEVICE, 0);
if (!skb) {
ipc_mux_deinit(ipc_mux);
return NULL;
}
/* Extend the UL ADB list. */
skb_queue_tail(free_list, skb);
}
return ipc_mux;
}
/* Informs the network stack to restart transmission for all opened session if
* Flow Control is not ON for that session.
*/
static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux)
{
struct mux_session *session;
int idx;
for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
session = &ipc_mux->session[idx];
if (!session->wwan)
continue;
/* If flow control of the session is OFF and if there was tx
* stop then restart. Inform the network interface to restart
* sending data.
*/
if (session->flow_ctl_mask == 0) {
session->net_tx_stop = false;
ipc_mux_netif_tx_flowctrl(session, idx, false);
}
}
}
/* Informs the network stack to stop sending further pkt for all opened
* sessions
*/
static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux)
{
struct mux_session *session;
int idx;
for (idx = 0; idx < IPC_MEM_MUX_IP_SESSION_ENTRIES; idx++) {
session = &ipc_mux->session[idx];
if (!session->wwan)
continue;
ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
}
}
void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux)
{
if (ipc_mux->ul_flow == MUX_UL) {
int low_thresh = IPC_MEM_MUX_UL_FLOWCTRL_LOW_B;
if (ipc_mux->ul_data_pend_bytes < low_thresh)
ipc_mux_restart_tx_for_all_sessions(ipc_mux);
}
}
int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux)
{
return ipc_mux ? IPC_MEM_MUX_IP_SESSION_ENTRIES : -EFAULT;
}
enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux)
{
return ipc_mux ? ipc_mux->protocol : MUX_UNKNOWN;
}
int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr)
{
struct mux_session_open *session_open;
union mux_msg mux_msg;
session_open = &mux_msg.session_open;
session_open->event = MUX_E_MUX_SESSION_OPEN;
session_open->if_id = cpu_to_le32(session_nr);
ipc_mux->session[session_nr].flags |= IPC_MEM_WWAN_MUX;
return ipc_mux_schedule(ipc_mux, &mux_msg);
}
int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr)
{
struct mux_session_close *session_close;
union mux_msg mux_msg;
int ret_val;
session_close = &mux_msg.session_close;
session_close->event = MUX_E_MUX_SESSION_CLOSE;
session_close->if_id = cpu_to_le32(session_nr);
ret_val = ipc_mux_schedule(ipc_mux, &mux_msg);
ipc_mux->session[session_nr].flags &= ~IPC_MEM_WWAN_MUX;
return ret_val;
}
void ipc_mux_deinit(struct iosm_mux *ipc_mux)
{
struct mux_channel_close *channel_close;
struct sk_buff_head *free_list;
union mux_msg mux_msg;
struct sk_buff *skb;
if (!ipc_mux->initialized)
return;
ipc_mux_stop_netif_for_all_sessions(ipc_mux);
if (ipc_mux->state == MUX_S_ACTIVE) {
channel_close = &mux_msg.channel_close;
channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
ipc_mux_schedule(ipc_mux, &mux_msg);
}
/* Empty the ADB free list. */
free_list = &ipc_mux->ul_adb.free_list;
/* Remove from the head of the downlink queue. */
while ((skb = skb_dequeue(free_list)))
ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
if (ipc_mux->channel) {
ipc_mux->channel->ul_pipe.is_open = false;
ipc_mux->channel->dl_pipe.is_open = false;
}
kfree(ipc_mux);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_mux.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/delay.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_devlink.h"
#include "iosm_ipc_imem.h"
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_port.h"
#include "iosm_ipc_task_queue.h"
/* Open a packet data online channel between the network layer and CP. */
int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
{
dev_dbg(ipc_imem->dev, "%s if id: %d",
ipc_imem_phase_get_string(ipc_imem->phase), if_id);
/* The network interface is only supported in the runtime phase. */
if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
ipc_imem_phase_get_string(ipc_imem->phase));
return -EIO;
}
return ipc_mux_open_session(ipc_imem->mux, if_id);
}
/* Release a net link to CP. */
void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
int channel_id)
{
if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
if_id <= IP_MUX_SESSION_END)
ipc_mux_close_session(ipc_imem->mux, if_id);
}
/* Tasklet call to do uplink transfer. */
static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
ipc_imem_ul_send(ipc_imem);
return 0;
}
/* Through tasklet to do sio write. */
static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
{
return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
NULL, 0, false);
}
/* Function for transfer UL data */
int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
int if_id, int channel_id, struct sk_buff *skb)
{
int ret = -EINVAL;
if (!ipc_imem || channel_id < 0)
goto out;
/* Is CP Running? */
if (ipc_imem->phase != IPC_P_RUN) {
dev_dbg(ipc_imem->dev, "phase %s transmit",
ipc_imem_phase_get_string(ipc_imem->phase));
ret = -EIO;
goto out;
}
/* Route the UL packet through IP MUX Layer */
ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
out:
return ret;
}
/* Initialize wwan channel */
int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
enum ipc_mux_protocol mux_type)
{
struct ipc_chnl_cfg chnl_cfg = { 0 };
ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
/* If modem version is invalid (0xffffffff), do not initialize WWAN. */
if (ipc_imem->cp_version == -1) {
dev_err(ipc_imem->dev, "invalid CP version");
return -EIO;
}
ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION &&
ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) {
chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL;
chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE;
}
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
IRQ_MOD_OFF);
/* WWAN registration. */
ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
if (!ipc_imem->wwan) {
dev_err(ipc_imem->dev,
"failed to register the ipc_wwan interfaces");
return -ENOMEM;
}
return 0;
}
/* Map SKB to DMA for transfer */
static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
struct sk_buff *skb)
{
struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
char *buf = skb->data;
int len = skb->len;
dma_addr_t mapping;
int ret;
ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
if (ret)
goto err;
BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
IPC_CB(skb)->mapping = mapping;
IPC_CB(skb)->direction = DMA_TO_DEVICE;
IPC_CB(skb)->len = len;
IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
err:
return ret;
}
/* return true if channel is ready for use */
static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
struct ipc_mem_channel *channel)
{
enum ipc_phase phase;
/* Update the current operation phase. */
phase = ipc_imem->phase;
/* Select the operation depending on the execution stage. */
switch (phase) {
case IPC_P_RUN:
case IPC_P_PSI:
case IPC_P_EBL:
break;
case IPC_P_ROM:
/* Prepare the PSI image for the CP ROM driver and
* suspend the flash app.
*/
if (channel->state != IMEM_CHANNEL_RESERVED) {
dev_err(ipc_imem->dev,
"ch[%d]:invalid channel state %d,expected %d",
channel->channel_id, channel->state,
IMEM_CHANNEL_RESERVED);
goto channel_unavailable;
}
goto channel_available;
default:
/* Ignore uplink actions in all other phases. */
dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
channel->channel_id, phase);
goto channel_unavailable;
}
/* Check the full availability of the channel. */
if (channel->state != IMEM_CHANNEL_ACTIVE) {
dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
channel->channel_id, channel->state);
goto channel_unavailable;
}
channel_available:
return true;
channel_unavailable:
return false;
}
/**
* ipc_imem_sys_port_close - Release a sio link to CP.
* @ipc_imem: Imem instance.
* @channel: Channel instance.
*/
void ipc_imem_sys_port_close(struct iosm_imem *ipc_imem,
struct ipc_mem_channel *channel)
{
enum ipc_phase curr_phase;
int status = 0;
u32 tail = 0;
curr_phase = ipc_imem->phase;
/* If current phase is IPC_P_OFF or SIO ID is -ve then
* channel is already freed. Nothing to do.
*/
if (curr_phase == IPC_P_OFF) {
dev_err(ipc_imem->dev,
"nothing to do. Current Phase: %s",
ipc_imem_phase_get_string(curr_phase));
return;
}
if (channel->state == IMEM_CHANNEL_FREE) {
dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
channel->channel_id, channel->state);
return;
}
/* If there are any pending TDs then wait for Timeout/Completion before
* closing pipe.
*/
if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
ipc_imem->app_notify_ul_pend = 1;
/* Suspend the user app and wait a certain time for processing
* UL Data.
*/
status = wait_for_completion_interruptible_timeout
(&ipc_imem->ul_pend_sem,
msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
if (status == 0) {
dev_dbg(ipc_imem->dev,
"Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
channel->ul_pipe.pipe_nr,
channel->ul_pipe.old_head,
channel->ul_pipe.old_tail);
}
ipc_imem->app_notify_ul_pend = 0;
}
/* If there are any pending TDs then wait for Timeout/Completion before
* closing pipe.
*/
ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
&channel->dl_pipe, NULL, &tail);
if (tail != channel->dl_pipe.old_tail) {
ipc_imem->app_notify_dl_pend = 1;
/* Suspend the user app and wait a certain time for processing
* DL Data.
*/
status = wait_for_completion_interruptible_timeout
(&ipc_imem->dl_pend_sem,
msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
if (status == 0) {
dev_dbg(ipc_imem->dev,
"Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
channel->dl_pipe.pipe_nr,
channel->dl_pipe.old_head,
channel->dl_pipe.old_tail);
}
ipc_imem->app_notify_dl_pend = 0;
}
/* Due to wait for completion in messages, there is a small window
* between closing the pipe and updating the channel is closed. In this
* small window there could be HP update from Host Driver. Hence update
* the channel state as CLOSING to aviod unnecessary interrupt
* towards CP.
*/
channel->state = IMEM_CHANNEL_CLOSING;
ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
ipc_imem_channel_free(channel);
}
/* Open a PORT link to CP and return the channel */
struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
int chl_id, int hp_id)
{
struct ipc_mem_channel *channel;
int ch_id;
/* The PORT interface is only supported in the runtime phase. */
if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
dev_err(ipc_imem->dev, "PORT open refused, phase %s",
ipc_imem_phase_get_string(ipc_imem->phase));
return NULL;
}
ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
if (ch_id < 0) {
dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
return NULL;
}
channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
if (!channel) {
dev_err(ipc_imem->dev, "PORT channel id open failed");
return NULL;
}
return channel;
}
/* transfer skb to modem */
int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
{
struct ipc_mem_channel *channel = ipc_cdev->channel;
struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
int ret = -EIO;
if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
ipc_imem->phase == IPC_P_OFF_REQ)
goto out;
ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
if (ret)
goto out;
/* Add skb to the uplink skbuf accumulator. */
skb_queue_tail(&channel->ul_list, skb);
ret = ipc_imem_call_cdev_write(ipc_imem);
if (ret) {
skb_dequeue_tail(&channel->ul_list);
dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
ipc_cdev->channel->channel_id);
}
out:
return ret;
}
/* Open a SIO link to CP and return the channel instance */
struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
enum ipc_phase phase;
int channel_id;
phase = ipc_imem_phase_update(ipc_imem);
switch (phase) {
case IPC_P_OFF:
case IPC_P_ROM:
/* Get a channel id as flash id and reserve it. */
channel_id = ipc_imem_channel_alloc(ipc_imem,
IPC_MEM_CTRL_CHL_ID_7,
IPC_CTYPE_CTRL);
if (channel_id < 0) {
dev_err(ipc_imem->dev,
"reservation of a flash channel id failed");
goto error;
}
ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
channel = &ipc_imem->channels[channel_id];
/* Enqueue chip info data to be read */
if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
dev_err(ipc_imem->dev, "Enqueue of chip info failed");
channel->state = IMEM_CHANNEL_FREE;
goto error;
}
return channel;
case IPC_P_PSI:
case IPC_P_EBL:
ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
if (ipc_imem->cp_version == -1) {
dev_err(ipc_imem->dev, "invalid CP version");
goto error;
}
channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
return ipc_imem_channel_open(ipc_imem, channel_id,
IPC_HP_CDEV_OPEN);
default:
/* CP is in the wrong state (e.g. CRASH or CD_READY) */
dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
}
error:
return NULL;
}
/* Release a SIO channel link to CP. */
void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
{
struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
enum ipc_mem_exec_stage exec_stage;
struct ipc_mem_channel *channel;
int status = 0;
u32 tail = 0;
channel = ipc_imem->ipc_devlink->devlink_sio.channel;
/* Increase the total wait time to boot_check_timeout */
do {
exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
exec_stage == IPC_MEM_EXEC_STAGE_PSI)
break;
msleep(20);
boot_check_timeout -= 20;
} while (boot_check_timeout > 0);
/* If there are any pending TDs then wait for Timeout/Completion before
* closing pipe.
*/
if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
status = wait_for_completion_interruptible_timeout
(&ipc_imem->ul_pend_sem,
msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
if (status == 0) {
dev_dbg(ipc_imem->dev,
"Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
channel->ul_pipe.pipe_nr,
channel->ul_pipe.old_head,
channel->ul_pipe.old_tail);
}
}
ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
&channel->dl_pipe, NULL, &tail);
if (tail != channel->dl_pipe.old_tail) {
status = wait_for_completion_interruptible_timeout
(&ipc_imem->dl_pend_sem,
msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
if (status == 0) {
dev_dbg(ipc_imem->dev,
"Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
channel->dl_pipe.pipe_nr,
channel->dl_pipe.old_head,
channel->dl_pipe.old_tail);
}
}
/* Due to wait for completion in messages, there is a small window
* between closing the pipe and updating the channel is closed. In this
* small window there could be HP update from Host Driver. Hence update
* the channel state as CLOSING to aviod unnecessary interrupt
* towards CP.
*/
channel->state = IMEM_CHANNEL_CLOSING;
/* Release the pipe resources */
ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
ipc_imem->nr_of_channels--;
}
void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
struct sk_buff *skb)
{
skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
complete(&ipc_devlink->devlink_sio.read_sem);
}
/* PSI transfer */
static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
struct ipc_mem_channel *channel,
unsigned char *buf, int count)
{
int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
enum ipc_mem_exec_stage exec_stage;
dma_addr_t mapping = 0;
int ret;
ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
DMA_TO_DEVICE);
if (ret)
goto pcie_addr_map_fail;
/* Save the PSI information for the CP ROM driver on the doorbell
* scratchpad.
*/
ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
ret = wait_for_completion_interruptible_timeout
(&channel->ul_sem,
msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
if (ret <= 0) {
dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
ret);
goto psi_transfer_fail;
}
/* If the PSI download fails, return the CP boot ROM exit code */
if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
ret = (-1) * ((int)ipc_imem->rom_exit_code);
goto psi_transfer_fail;
}
dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
/* Wait psi_start_timeout milliseconds until the CP PSI image is
* running and updates the execution_stage field with
* IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
*/
do {
exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
break;
msleep(20);
psi_start_timeout -= 20;
} while (psi_start_timeout > 0);
if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
goto psi_transfer_fail; /* Unknown status of CP PSI process. */
ipc_imem->phase = IPC_P_PSI;
/* Enter the PSI phase. */
dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
/* Request the RUNNING state from CP and wait until it was reached
* or timeout.
*/
ipc_imem_ipc_init_check(ipc_imem);
ret = wait_for_completion_interruptible_timeout
(&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
if (ret <= 0) {
dev_err(ipc_imem->dev,
"Failed PSI RUNNING state on CP, Error-%d", ret);
goto psi_transfer_fail;
}
if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
IPC_MEM_DEVICE_IPC_RUNNING) {
dev_err(ipc_imem->dev,
"ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
channel->channel_id,
ipc_imem_phase_get_string(ipc_imem->phase),
ipc_mmio_get_ipc_state(ipc_imem->mmio));
goto psi_transfer_fail;
}
/* Create the flash channel for the transfer of the images. */
if (!ipc_imem_sys_devlink_open(ipc_imem)) {
dev_err(ipc_imem->dev, "can't open flash_channel");
goto psi_transfer_fail;
}
ret = 0;
psi_transfer_fail:
ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
pcie_addr_map_fail:
return ret;
}
int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
unsigned char *buf, int count)
{
struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
struct ipc_mem_channel *channel;
struct sk_buff *skb;
dma_addr_t mapping;
int ret;
channel = ipc_imem->ipc_devlink->devlink_sio.channel;
/* In the ROM phase the PSI image is passed to CP about a specific
* shared memory area and doorbell scratchpad directly.
*/
if (ipc_imem->phase == IPC_P_ROM) {
ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
/* If the PSI transfer fails then send crash
* Signature.
*/
if (ret > 0)
ipc_imem_msg_send_feature_set(ipc_imem,
IPC_MEM_INBAND_CRASH_SIG,
false);
goto out;
}
/* Allocate skb memory for the uplink buffer. */
skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
DMA_TO_DEVICE, 0);
if (!skb) {
ret = -ENOMEM;
goto out;
}
skb_put_data(skb, buf, count);
IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
/* Add skb to the uplink skbuf accumulator. */
skb_queue_tail(&channel->ul_list, skb);
/* Inform the IPC tasklet to pass uplink IP packets to CP. */
if (!ipc_imem_call_cdev_write(ipc_imem)) {
ret = wait_for_completion_interruptible(&channel->ul_sem);
if (ret < 0) {
dev_err(ipc_imem->dev,
"ch[%d] no CP confirmation, status = %d",
channel->channel_id, ret);
ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
goto out;
}
}
ret = 0;
out:
return ret;
}
int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
u32 bytes_to_read, u32 *bytes_read)
{
struct sk_buff *skb = NULL;
int rc = 0;
/* check skb is available in rx_list or wait for skb */
devlink->devlink_sio.devlink_read_pend = 1;
while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
if (!wait_for_completion_interruptible_timeout
(&devlink->devlink_sio.read_sem,
msecs_to_jiffies(IPC_READ_TIMEOUT))) {
dev_err(devlink->dev, "Read timedout");
rc = -ETIMEDOUT;
goto devlink_read_fail;
}
}
devlink->devlink_sio.devlink_read_pend = 0;
if (bytes_to_read < skb->len) {
dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
rc = -EINVAL;
goto devlink_read_fail;
}
*bytes_read = skb->len;
memcpy(data, skb->data, skb->len);
devlink_read_fail:
dev_kfree_skb(skb);
return rc;
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-2021 Intel Corporation.
*/
#include <linux/vmalloc.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_coredump.h"
#include "iosm_ipc_devlink.h"
#include "iosm_ipc_flash.h"
/* Coredump list */
static struct iosm_coredump_file_info list[IOSM_NOF_CD_REGION] = {
{"report.json", REPORT_JSON_SIZE,},
{"coredump.fcd", COREDUMP_FCD_SIZE,},
{"cdd.log", CDD_LOG_SIZE,},
{"eeprom.bin", EEPROM_BIN_SIZE,},
{"bootcore_trace.bin", BOOTCORE_TRC_BIN_SIZE,},
{"bootcore_prev_trace.bin", BOOTCORE_PREV_TRC_BIN_SIZE,},
};
/* Get the param values for the specific param ID's */
static int ipc_devlink_get_param(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);
if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
ctx->val.vu8 = ipc_devlink->param.erase_full_flash;
return 0;
}
/* Set the param values for the specific param ID's */
static int ipc_devlink_set_param(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);
if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
ipc_devlink->param.erase_full_flash = ctx->val.vu8;
return 0;
}
/* Devlink param structure array */
static const struct devlink_param iosm_devlink_params[] = {
DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH,
"erase_full_flash", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ipc_devlink_get_param, ipc_devlink_set_param,
NULL),
};
/* Get devlink flash component type */
static enum iosm_flash_comp_type
ipc_devlink_get_flash_comp_type(const char comp_str[], u32 len)
{
enum iosm_flash_comp_type fls_type;
if (!strncmp("PSI", comp_str, len))
fls_type = FLASH_COMP_TYPE_PSI;
else if (!strncmp("EBL", comp_str, len))
fls_type = FLASH_COMP_TYPE_EBL;
else if (!strncmp("FLS", comp_str, len))
fls_type = FLASH_COMP_TYPE_FLS;
else
fls_type = FLASH_COMP_TYPE_INVAL;
return fls_type;
}
/* Function triggered on devlink flash command
* Flash update function which calls multiple functions based on
* component type specified in the flash command
*/
static int ipc_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct iosm_devlink *ipc_devlink = devlink_priv(devlink);
enum iosm_flash_comp_type fls_type;
struct iosm_devlink_image *header;
int rc = -EINVAL;
u8 *mdm_rsp;
header = (struct iosm_devlink_image *)params->fw->data;
if (!header || params->fw->size <= IOSM_DEVLINK_HDR_SIZE ||
(memcmp(header->magic_header, IOSM_DEVLINK_MAGIC_HEADER,
IOSM_DEVLINK_MAGIC_HEADER_LEN) != 0))
return -EINVAL;
mdm_rsp = kzalloc(IOSM_EBL_DW_PACK_SIZE, GFP_KERNEL);
if (!mdm_rsp)
return -ENOMEM;
fls_type = ipc_devlink_get_flash_comp_type(header->image_type,
IOSM_DEVLINK_MAX_IMG_LEN);
switch (fls_type) {
case FLASH_COMP_TYPE_PSI:
rc = ipc_flash_boot_psi(ipc_devlink, params->fw);
break;
case FLASH_COMP_TYPE_EBL:
rc = ipc_flash_boot_ebl(ipc_devlink, params->fw);
if (rc)
break;
rc = ipc_flash_boot_set_capabilities(ipc_devlink, mdm_rsp);
if (rc)
break;
rc = ipc_flash_read_swid(ipc_devlink, mdm_rsp);
break;
case FLASH_COMP_TYPE_FLS:
rc = ipc_flash_send_fls(ipc_devlink, params->fw, mdm_rsp);
break;
default:
devlink_flash_update_status_notify(devlink, "Invalid component",
NULL, 0, 0);
break;
}
if (!rc)
devlink_flash_update_status_notify(devlink, "Flashing success",
header->image_type, 0, 0);
else
devlink_flash_update_status_notify(devlink, "Flashing failed",
header->image_type, 0, 0);
kfree(mdm_rsp);
return rc;
}
/* Call back function for devlink ops */
static const struct devlink_ops devlink_flash_ops = {
.flash_update = ipc_devlink_flash_update,
};
/**
* ipc_devlink_send_cmd - Send command to Modem
* @ipc_devlink: Pointer to struct iosm_devlink
* @cmd: Command to be sent to modem
* @entry: Command entry number
*
* Returns: 0 on success and failure value on error
*/
int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry)
{
struct iosm_rpsi_cmd rpsi_cmd;
rpsi_cmd.param.dword = cpu_to_le32(entry);
rpsi_cmd.cmd = cpu_to_le16(cmd);
rpsi_cmd.crc = rpsi_cmd.param.word[0] ^ rpsi_cmd.param.word[1] ^
rpsi_cmd.cmd;
return ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&rpsi_cmd,
sizeof(rpsi_cmd));
}
/* Function to create snapshot */
static int ipc_devlink_coredump_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
u8 **data)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);
struct iosm_coredump_file_info *cd_list = ops->priv;
u32 region_size;
int rc;
dev_dbg(ipc_devlink->dev, "Region:%s, ID:%d", ops->name,
cd_list->entry);
region_size = cd_list->default_size;
rc = ipc_coredump_collect(ipc_devlink, data, cd_list->entry,
region_size);
if (rc) {
dev_err(ipc_devlink->dev, "Fail to create snapshot,err %d", rc);
goto coredump_collect_err;
}
/* Send coredump end cmd indicating end of coredump collection */
if (cd_list->entry == (IOSM_NOF_CD_REGION - 1))
ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end);
return 0;
coredump_collect_err:
ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end);
return rc;
}
/* To create regions for coredump files */
static int ipc_devlink_create_region(struct iosm_devlink *devlink)
{
struct devlink_region_ops *mdm_coredump;
int rc = 0;
int i;
mdm_coredump = devlink->iosm_devlink_mdm_coredump;
for (i = 0; i < IOSM_NOF_CD_REGION; i++) {
mdm_coredump[i].name = list[i].filename;
mdm_coredump[i].snapshot = ipc_devlink_coredump_snapshot;
mdm_coredump[i].destructor = vfree;
devlink->cd_regions[i] =
devlink_region_create(devlink->devlink_ctx,
&mdm_coredump[i], MAX_SNAPSHOTS,
list[i].default_size);
if (IS_ERR(devlink->cd_regions[i])) {
rc = PTR_ERR(devlink->cd_regions[i]);
dev_err(devlink->dev, "Devlink region fail,err %d", rc);
/* Delete previously created regions */
for ( ; i >= 0; i--)
devlink_region_destroy(devlink->cd_regions[i]);
goto region_create_fail;
}
list[i].entry = i;
mdm_coredump[i].priv = list + i;
}
region_create_fail:
return rc;
}
/* To Destroy devlink regions */
static void ipc_devlink_destroy_region(struct iosm_devlink *ipc_devlink)
{
u8 i;
for (i = 0; i < IOSM_NOF_CD_REGION; i++)
devlink_region_destroy(ipc_devlink->cd_regions[i]);
}
/**
* ipc_devlink_init - Initialize/register devlink to IOSM driver
* @ipc_imem: Pointer to struct iosm_imem
*
* Returns: Pointer to iosm_devlink on success and NULL on failure
*/
struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem)
{
struct ipc_chnl_cfg chnl_cfg_flash = { 0 };
struct iosm_devlink *ipc_devlink;
struct devlink *devlink_ctx;
int rc;
devlink_ctx = devlink_alloc(&devlink_flash_ops,
sizeof(struct iosm_devlink),
ipc_imem->dev);
if (!devlink_ctx) {
dev_err(ipc_imem->dev, "devlink_alloc failed");
goto devlink_alloc_fail;
}
ipc_devlink = devlink_priv(devlink_ctx);
ipc_devlink->devlink_ctx = devlink_ctx;
ipc_devlink->pcie = ipc_imem->pcie;
ipc_devlink->dev = ipc_imem->dev;
rc = devlink_params_register(devlink_ctx, iosm_devlink_params,
ARRAY_SIZE(iosm_devlink_params));
if (rc) {
dev_err(ipc_devlink->dev,
"devlink_params_register failed. rc %d", rc);
goto param_reg_fail;
}
ipc_devlink->cd_file_info = list;
rc = ipc_devlink_create_region(ipc_devlink);
if (rc) {
dev_err(ipc_devlink->dev, "Devlink Region create failed, rc %d",
rc);
goto region_create_fail;
}
if (ipc_chnl_cfg_get(&chnl_cfg_flash, IPC_MEM_CTRL_CHL_ID_7) < 0)
goto chnl_get_fail;
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
chnl_cfg_flash, IRQ_MOD_OFF);
init_completion(&ipc_devlink->devlink_sio.read_sem);
skb_queue_head_init(&ipc_devlink->devlink_sio.rx_list);
devlink_register(devlink_ctx);
dev_dbg(ipc_devlink->dev, "iosm devlink register success");
return ipc_devlink;
chnl_get_fail:
ipc_devlink_destroy_region(ipc_devlink);
region_create_fail:
devlink_params_unregister(devlink_ctx, iosm_devlink_params,
ARRAY_SIZE(iosm_devlink_params));
param_reg_fail:
devlink_free(devlink_ctx);
devlink_alloc_fail:
return NULL;
}
/**
* ipc_devlink_deinit - To unintialize the devlink from IOSM driver.
* @ipc_devlink: Devlink instance
*/
void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink)
{
struct devlink *devlink_ctx = ipc_devlink->devlink_ctx;
devlink_unregister(devlink_ctx);
ipc_devlink_destroy_region(ipc_devlink);
devlink_params_unregister(devlink_ctx, iosm_devlink_params,
ARRAY_SIZE(iosm_devlink_params));
if (ipc_devlink->devlink_sio.devlink_read_pend) {
complete(&ipc_devlink->devlink_sio.read_sem);
complete(&ipc_devlink->devlink_sio.channel->ul_sem);
}
if (!ipc_devlink->devlink_sio.devlink_read_pend)
skb_queue_purge(&ipc_devlink->devlink_sio.rx_list);
ipc_imem_sys_devlink_close(ipc_devlink);
devlink_free(devlink_ctx);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_devlink.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020-21 Intel Corporation.
*/
#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/slab.h>
#include "iosm_ipc_uevent.h"
/* Update the uevent in work queue context */
static void ipc_uevent_work(struct work_struct *data)
{
struct ipc_uevent_info *info;
char *envp[2] = { NULL, NULL };
info = container_of(data, struct ipc_uevent_info, work);
envp[0] = info->uevent;
if (kobject_uevent_env(&info->dev->kobj, KOBJ_CHANGE, envp))
pr_err("uevent %s failed to sent", info->uevent);
kfree(info);
}
void ipc_uevent_send(struct device *dev, char *uevent)
{
struct ipc_uevent_info *info = kzalloc(sizeof(*info), GFP_ATOMIC);
if (!info)
return;
/* Initialize the kernel work queue */
INIT_WORK(&info->work, ipc_uevent_work);
/* Store the device and event information */
info->dev = dev;
snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent);
/* Schedule uevent in process context using work queue */
schedule_work(&info->work);
}
|
linux-master
|
drivers/net/wwan/iosm/iosm_ipc_uevent.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <[email protected]>
* Moises Veleta <[email protected]>
* Sreehari Kancharla <[email protected]>
*
* Contributors:
* Amir Hanania <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Ricardo Martinez <[email protected]>
*/
#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/types.h>
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_reg.h"
#define T7XX_PCIE_REG_BAR 2
#define T7XX_PCIE_REG_PORT ATR_SRC_PCI_WIN0
#define T7XX_PCIE_REG_TABLE_NUM 0
#define T7XX_PCIE_REG_TRSL_PORT ATR_DST_AXIM_0
#define T7XX_PCIE_DEV_DMA_PORT_START ATR_SRC_AXIS_0
#define T7XX_PCIE_DEV_DMA_PORT_END ATR_SRC_AXIS_2
#define T7XX_PCIE_DEV_DMA_TABLE_NUM 0
#define T7XX_PCIE_DEV_DMA_TRSL_ADDR 0
#define T7XX_PCIE_DEV_DMA_SRC_ADDR 0
#define T7XX_PCIE_DEV_DMA_TRANSPARENT 1
#define T7XX_PCIE_DEV_DMA_SIZE 0
enum t7xx_atr_src_port {
ATR_SRC_PCI_WIN0,
ATR_SRC_PCI_WIN1,
ATR_SRC_AXIS_0,
ATR_SRC_AXIS_1,
ATR_SRC_AXIS_2,
ATR_SRC_AXIS_3,
};
enum t7xx_atr_dst_port {
ATR_DST_PCI_TRX,
ATR_DST_PCI_CONFIG,
ATR_DST_AXIM_0 = 4,
ATR_DST_AXIM_1,
ATR_DST_AXIM_2,
ATR_DST_AXIM_3,
};
struct t7xx_atr_config {
u64 src_addr;
u64 trsl_addr;
u64 size;
u32 port;
u32 table;
enum t7xx_atr_dst_port trsl_id;
u32 transparent;
};
static void t7xx_pcie_mac_atr_tables_dis(void __iomem *pbase, enum t7xx_atr_src_port port)
{
void __iomem *reg;
int i, offset;
for (i = 0; i < ATR_TABLE_NUM_PER_ATR; i++) {
offset = ATR_PORT_OFFSET * port + ATR_TABLE_OFFSET * i;
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
iowrite64(0, reg);
}
}
static int t7xx_pcie_mac_atr_cfg(struct t7xx_pci_dev *t7xx_dev, struct t7xx_atr_config *cfg)
{
struct device *dev = &t7xx_dev->pdev->dev;
void __iomem *pbase = IREG_BASE(t7xx_dev);
int atr_size, pos, offset;
void __iomem *reg;
u64 value;
if (cfg->transparent) {
/* No address conversion is performed */
atr_size = ATR_TRANSPARENT_SIZE;
} else {
if (cfg->src_addr & (cfg->size - 1)) {
dev_err(dev, "Source address is not aligned to size\n");
return -EINVAL;
}
if (cfg->trsl_addr & (cfg->size - 1)) {
dev_err(dev, "Translation address %llx is not aligned to size %llx\n",
cfg->trsl_addr, cfg->size - 1);
return -EINVAL;
}
pos = __ffs64(cfg->size);
/* HW calculates the address translation space as 2^(atr_size + 1) */
atr_size = pos - 1;
}
offset = ATR_PORT_OFFSET * cfg->port + ATR_TABLE_OFFSET * cfg->table;
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_ADDR + offset;
value = cfg->trsl_addr & ATR_PCIE_WIN0_ADDR_ALGMT;
iowrite64(value, reg);
reg = pbase + ATR_PCIE_WIN0_T0_TRSL_PARAM + offset;
iowrite32(cfg->trsl_id, reg);
reg = pbase + ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR + offset;
value = (cfg->src_addr & ATR_PCIE_WIN0_ADDR_ALGMT) | (atr_size << 1) | BIT(0);
iowrite64(value, reg);
/* Ensure ATR is set */
ioread64(reg);
return 0;
}
/**
* t7xx_pcie_mac_atr_init() - Initialize address translation.
* @t7xx_dev: MTK device.
*
* Setup ATR for ports & device.
*/
void t7xx_pcie_mac_atr_init(struct t7xx_pci_dev *t7xx_dev)
{
struct t7xx_atr_config cfg;
u32 i;
/* Disable for all ports */
for (i = ATR_SRC_PCI_WIN0; i <= ATR_SRC_AXIS_3; i++)
t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), i);
memset(&cfg, 0, sizeof(cfg));
/* Config ATR for RC to access device's register */
cfg.src_addr = pci_resource_start(t7xx_dev->pdev, T7XX_PCIE_REG_BAR);
cfg.size = T7XX_PCIE_REG_SIZE_CHIP;
cfg.trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP;
cfg.port = T7XX_PCIE_REG_PORT;
cfg.table = T7XX_PCIE_REG_TABLE_NUM;
cfg.trsl_id = T7XX_PCIE_REG_TRSL_PORT;
t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port);
t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg);
t7xx_dev->base_addr.pcie_dev_reg_trsl_addr = T7XX_PCIE_REG_TRSL_ADDR_CHIP;
/* Config ATR for EP to access RC's memory */
for (i = T7XX_PCIE_DEV_DMA_PORT_START; i <= T7XX_PCIE_DEV_DMA_PORT_END; i++) {
cfg.src_addr = T7XX_PCIE_DEV_DMA_SRC_ADDR;
cfg.size = T7XX_PCIE_DEV_DMA_SIZE;
cfg.trsl_addr = T7XX_PCIE_DEV_DMA_TRSL_ADDR;
cfg.port = i;
cfg.table = T7XX_PCIE_DEV_DMA_TABLE_NUM;
cfg.trsl_id = ATR_DST_PCI_TRX;
cfg.transparent = T7XX_PCIE_DEV_DMA_TRANSPARENT;
t7xx_pcie_mac_atr_tables_dis(IREG_BASE(t7xx_dev), cfg.port);
t7xx_pcie_mac_atr_cfg(t7xx_dev, &cfg);
}
}
/**
* t7xx_pcie_mac_enable_disable_int() - Enable/disable interrupts.
* @t7xx_dev: MTK device.
* @enable: Enable/disable.
*
* Enable or disable device interrupts.
*/
static void t7xx_pcie_mac_enable_disable_int(struct t7xx_pci_dev *t7xx_dev, bool enable)
{
u32 value;
value = ioread32(IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL);
if (enable)
value &= ~ISTAT_HST_CTRL_DIS;
else
value |= ISTAT_HST_CTRL_DIS;
iowrite32(value, IREG_BASE(t7xx_dev) + ISTAT_HST_CTRL);
}
void t7xx_pcie_mac_interrupts_en(struct t7xx_pci_dev *t7xx_dev)
{
t7xx_pcie_mac_enable_disable_int(t7xx_dev, true);
}
void t7xx_pcie_mac_interrupts_dis(struct t7xx_pci_dev *t7xx_dev)
{
t7xx_pcie_mac_enable_disable_int(t7xx_dev, false);
}
/**
* t7xx_pcie_mac_clear_set_int() - Clear/set interrupt by type.
* @t7xx_dev: MTK device.
* @int_type: Interrupt type.
* @clear: Clear/set.
*
* Clear or set device interrupt by type.
*/
static void t7xx_pcie_mac_clear_set_int(struct t7xx_pci_dev *t7xx_dev,
enum t7xx_int int_type, bool clear)
{
void __iomem *reg;
u32 val;
if (clear)
reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0;
else
reg = IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_SET_GRP0_0;
val = BIT(EXT_INT_START + int_type);
iowrite32(val, reg);
}
void t7xx_pcie_mac_clear_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
{
t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, true);
}
void t7xx_pcie_mac_set_int(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
{
t7xx_pcie_mac_clear_set_int(t7xx_dev, int_type, false);
}
/**
* t7xx_pcie_mac_clear_int_status() - Clear interrupt status by type.
* @t7xx_dev: MTK device.
* @int_type: Interrupt type.
*
* Enable or disable device interrupts' status by type.
*/
void t7xx_pcie_mac_clear_int_status(struct t7xx_pci_dev *t7xx_dev, enum t7xx_int int_type)
{
void __iomem *reg = IREG_BASE(t7xx_dev) + MSIX_ISTAT_HST_GRP0_0;
u32 val = BIT(EXT_INT_START + int_type);
iowrite32(val, reg);
}
/**
* t7xx_pcie_set_mac_msix_cfg() - Write MSIX control configuration.
* @t7xx_dev: MTK device.
* @irq_count: Number of MSIX IRQ vectors.
*
* Write IRQ count to device.
*/
void t7xx_pcie_set_mac_msix_cfg(struct t7xx_pci_dev *t7xx_dev, unsigned int irq_count)
{
u32 val = ffs(irq_count) * 2 - 1;
iowrite32(val, IREG_BASE(t7xx_dev) + T7XX_PCIE_CFG_MSIX);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_pcie_mac.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <[email protected]>
* Ricardo Martinez <[email protected]>
* Sreehari Kancharla <[email protected]>
*
* Contributors:
* Amir Hanania <[email protected]>
* Andy Shevchenko <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Eliot Lee <[email protected]>
* Moises Veleta <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/spinlock.h>
#include "t7xx_mhccif.h"
#include "t7xx_modem_ops.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
#define T7XX_INIT_TIMEOUT 20
#define PM_SLEEP_DIS_TIMEOUT_MS 20
#define PM_ACK_TIMEOUT_MS 1500
#define PM_AUTOSUSPEND_MS 20000
#define PM_RESOURCE_POLL_TIMEOUT_US 10000
#define PM_RESOURCE_POLL_STEP_US 100
enum t7xx_pm_state {
MTK_PM_EXCEPTION,
MTK_PM_INIT, /* Device initialized, but handshake not completed */
MTK_PM_SUSPENDED,
MTK_PM_RESUMED,
};
static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
{
void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
u32 value;
value = ioread32(ctrl_reg);
if (enable)
value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
else
value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
iowrite32(value, ctrl_reg);
}
static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
{
int ret, val;
ret = read_poll_timeout(ioread32, val,
(val & T7XX_PCIE_RESOURCE_STS_MSK) == T7XX_PCIE_RESOURCE_STS_MSK,
PM_RESOURCE_POLL_STEP_US, PM_RESOURCE_POLL_TIMEOUT_US, true,
IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
if (ret == -ETIMEDOUT)
dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n");
return ret;
}
static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
{
struct pci_dev *pdev = t7xx_dev->pdev;
INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
mutex_init(&t7xx_dev->md_pm_entity_mtx);
spin_lock_init(&t7xx_dev->md_pm_lock);
init_completion(&t7xx_dev->sleep_lock_acquire);
init_completion(&t7xx_dev->pm_sr_ack);
init_completion(&t7xx_dev->init_done);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
device_init_wakeup(&pdev->dev, true);
dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags |
DPM_FLAG_NO_DIRECT_COMPLETE);
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
pm_runtime_use_autosuspend(&pdev->dev);
return t7xx_wait_pm_config(t7xx_dev);
}
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
{
/* Enable the PCIe resource lock only after MD deep sleep is done */
t7xx_mhccif_mask_clr(t7xx_dev,
D2H_INT_DS_LOCK_ACK |
D2H_INT_SUSPEND_ACK |
D2H_INT_RESUME_ACK |
D2H_INT_SUSPEND_ACK_AP |
D2H_INT_RESUME_ACK_AP);
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
complete_all(&t7xx_dev->init_done);
}
static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
{
/* The device is kept in FSM re-init flow
* so just roll back PM setting to the init setting.
*/
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
pm_runtime_get_noresume(&t7xx_dev->pdev->dev);
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
return t7xx_wait_pm_config(t7xx_dev);
}
void t7xx_pci_pm_exp_detected(struct t7xx_pci_dev *t7xx_dev)
{
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
t7xx_wait_pm_config(t7xx_dev);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION);
}
int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
{
struct md_pm_entity *entity;
mutex_lock(&t7xx_dev->md_pm_entity_mtx);
list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
if (entity->id == pm_entity->id) {
mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
return -EEXIST;
}
}
list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities);
mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
return 0;
}
int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity)
{
struct md_pm_entity *entity, *tmp_entity;
mutex_lock(&t7xx_dev->md_pm_entity_mtx);
list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) {
if (entity->id == pm_entity->id) {
list_del(&pm_entity->entity);
mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
return 0;
}
}
mutex_unlock(&t7xx_dev->md_pm_entity_mtx);
return -ENXIO;
}
int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
int ret;
ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
if (!ret)
dev_err_ratelimited(dev, "Resource wait complete timed out\n");
return ret;
}
/**
* t7xx_pci_disable_sleep() - Disable deep sleep capability.
* @t7xx_dev: MTK device.
*
* Lock the deep sleep capability, note that the device can still go into deep sleep
* state while device is in D0 state, from the host's point-of-view.
*
* If device is in deep sleep state, wake up the device and disable deep sleep capability.
*/
void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
{
unsigned long flags;
spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
t7xx_dev->sleep_disable_count++;
if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
goto unlock_and_complete;
if (t7xx_dev->sleep_disable_count == 1) {
u32 status;
reinit_completion(&t7xx_dev->sleep_lock_acquire);
t7xx_dev_set_sleep_capability(t7xx_dev, false);
status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
if (status & T7XX_PCIE_RESOURCE_STS_MSK)
goto unlock_and_complete;
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
}
spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
return;
unlock_and_complete:
spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
complete_all(&t7xx_dev->sleep_lock_acquire);
}
/**
* t7xx_pci_enable_sleep() - Enable deep sleep capability.
* @t7xx_dev: MTK device.
*
* After enabling deep sleep, device can enter into deep sleep state.
*/
void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
{
unsigned long flags;
spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
t7xx_dev->sleep_disable_count--;
if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
goto unlock;
if (t7xx_dev->sleep_disable_count == 0)
t7xx_dev_set_sleep_capability(t7xx_dev, true);
unlock:
spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
}
static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
{
unsigned long wait_ret;
reinit_completion(&t7xx_dev->pm_sr_ack);
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, request);
wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack,
msecs_to_jiffies(PM_ACK_TIMEOUT_MS));
if (!wait_ret)
return -ETIMEDOUT;
return 0;
}
static int __t7xx_pci_pm_suspend(struct pci_dev *pdev)
{
enum t7xx_pm_id entity_id = PM_ENTITY_ID_INVALID;
struct t7xx_pci_dev *t7xx_dev;
struct md_pm_entity *entity;
int ret;
t7xx_dev = pci_get_drvdata(pdev);
if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n");
return -EFAULT;
}
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
ret = t7xx_wait_pm_config(t7xx_dev);
if (ret) {
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
return ret;
}
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
t7xx_dev->rgu_pci_irq_en = false;
list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
if (!entity->suspend)
continue;
ret = entity->suspend(t7xx_dev, entity->entity_param);
if (ret) {
entity_id = entity->id;
dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id);
goto abort_suspend;
}
}
ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ);
if (ret) {
dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret);
goto abort_suspend;
}
ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_SUSPEND_REQ_AP);
if (ret) {
t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret);
goto abort_suspend;
}
list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
if (entity->suspend_late)
entity->suspend_late(t7xx_dev, entity->entity_param);
}
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
return 0;
abort_suspend:
list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
if (entity_id == entity->id)
break;
if (entity->resume)
entity->resume(t7xx_dev, entity->entity_param);
}
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
return ret;
}
static void t7xx_pcie_interrupt_reinit(struct t7xx_pci_dev *t7xx_dev)
{
t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
/* Disable interrupt first and let the IPs enable them */
iowrite32(MSIX_MSK_SET_ALL, IREG_BASE(t7xx_dev) + IMASK_HOST_MSIX_CLR_GRP0_0);
/* Device disables PCIe interrupts during resume and
* following function will re-enable PCIe interrupts.
*/
t7xx_pcie_mac_interrupts_en(t7xx_dev);
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
}
static int t7xx_pcie_reinit(struct t7xx_pci_dev *t7xx_dev, bool is_d3)
{
int ret;
ret = pcim_enable_device(t7xx_dev->pdev);
if (ret)
return ret;
t7xx_pcie_mac_atr_init(t7xx_dev);
t7xx_pcie_interrupt_reinit(t7xx_dev);
if (is_d3) {
t7xx_mhccif_init(t7xx_dev);
return t7xx_pci_pm_reinit(t7xx_dev);
}
return 0;
}
static int t7xx_send_fsm_command(struct t7xx_pci_dev *t7xx_dev, u32 event)
{
struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl;
struct device *dev = &t7xx_dev->pdev->dev;
int ret = -EINVAL;
switch (event) {
case FSM_CMD_STOP:
ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
break;
case FSM_CMD_START:
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
t7xx_dev->rgu_pci_irq_en = true;
t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
ret = t7xx_fsm_append_cmd(fsm_ctl, FSM_CMD_START, 0);
break;
default:
break;
}
if (ret)
dev_err(dev, "Failure handling FSM command %u, %d\n", event, ret);
return ret;
}
static int __t7xx_pci_pm_resume(struct pci_dev *pdev, bool state_check)
{
struct t7xx_pci_dev *t7xx_dev;
struct md_pm_entity *entity;
u32 prev_state;
int ret = 0;
t7xx_dev = pci_get_drvdata(pdev);
if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) {
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
return 0;
}
t7xx_pcie_mac_interrupts_en(t7xx_dev);
prev_state = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_PM_RESUME_STATE);
if (state_check) {
/* For D3/L3 resume, the device could boot so quickly that the
* initial value of the dummy register might be overwritten.
* Identify new boots if the ATR source address register is not initialized.
*/
u32 atr_reg_val = ioread32(IREG_BASE(t7xx_dev) +
ATR_PCIE_WIN0_T0_ATR_PARAM_SRC_ADDR);
if (prev_state == PM_RESUME_REG_STATE_L3 ||
(prev_state == PM_RESUME_REG_STATE_INIT &&
atr_reg_val == ATR_SRC_ADDR_INVALID)) {
ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
if (ret)
return ret;
ret = t7xx_pcie_reinit(t7xx_dev, true);
if (ret)
return ret;
t7xx_clear_rgu_irq(t7xx_dev);
return t7xx_send_fsm_command(t7xx_dev, FSM_CMD_START);
}
if (prev_state == PM_RESUME_REG_STATE_EXP ||
prev_state == PM_RESUME_REG_STATE_L2_EXP) {
if (prev_state == PM_RESUME_REG_STATE_L2_EXP) {
ret = t7xx_pcie_reinit(t7xx_dev, false);
if (ret)
return ret;
}
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
t7xx_dev->rgu_pci_irq_en = true;
t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
t7xx_mhccif_mask_clr(t7xx_dev,
D2H_INT_EXCEPTION_INIT |
D2H_INT_EXCEPTION_INIT_DONE |
D2H_INT_EXCEPTION_CLEARQ_DONE |
D2H_INT_EXCEPTION_ALLQ_RESET |
D2H_INT_PORT_ENUM);
return ret;
}
if (prev_state == PM_RESUME_REG_STATE_L2) {
ret = t7xx_pcie_reinit(t7xx_dev, false);
if (ret)
return ret;
} else if (prev_state != PM_RESUME_REG_STATE_L1 &&
prev_state != PM_RESUME_REG_STATE_INIT) {
ret = t7xx_send_fsm_command(t7xx_dev, FSM_CMD_STOP);
if (ret)
return ret;
t7xx_clear_rgu_irq(t7xx_dev);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED);
return 0;
}
}
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
t7xx_wait_pm_config(t7xx_dev);
list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
if (entity->resume_early)
entity->resume_early(t7xx_dev, entity->entity_param);
}
ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ);
if (ret)
dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret);
ret = t7xx_send_pm_request(t7xx_dev, H2D_CH_RESUME_REQ_AP);
if (ret)
dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret);
list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) {
if (entity->resume) {
ret = entity->resume(t7xx_dev, entity->entity_param);
if (ret)
dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n",
entity->id, ret);
}
}
t7xx_dev->rgu_pci_irq_en = true;
t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
pm_runtime_mark_last_busy(&pdev->dev);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
return ret;
}
static int t7xx_pci_pm_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct t7xx_pci_dev *t7xx_dev;
t7xx_dev = pci_get_drvdata(pdev);
t7xx_pcie_mac_interrupts_dis(t7xx_dev);
return 0;
}
static void t7xx_pci_shutdown(struct pci_dev *pdev)
{
__t7xx_pci_pm_suspend(pdev);
}
static int t7xx_pci_pm_prepare(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct t7xx_pci_dev *t7xx_dev;
t7xx_dev = pci_get_drvdata(pdev);
if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
dev_warn(dev, "Not ready for system sleep.\n");
return -ETIMEDOUT;
}
return 0;
}
static int t7xx_pci_pm_suspend(struct device *dev)
{
return __t7xx_pci_pm_suspend(to_pci_dev(dev));
}
static int t7xx_pci_pm_resume(struct device *dev)
{
return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
}
static int t7xx_pci_pm_thaw(struct device *dev)
{
return __t7xx_pci_pm_resume(to_pci_dev(dev), false);
}
static int t7xx_pci_pm_runtime_suspend(struct device *dev)
{
return __t7xx_pci_pm_suspend(to_pci_dev(dev));
}
static int t7xx_pci_pm_runtime_resume(struct device *dev)
{
return __t7xx_pci_pm_resume(to_pci_dev(dev), true);
}
static const struct dev_pm_ops t7xx_pci_pm_ops = {
.prepare = t7xx_pci_pm_prepare,
.suspend = t7xx_pci_pm_suspend,
.resume = t7xx_pci_pm_resume,
.resume_noirq = t7xx_pci_pm_resume_noirq,
.freeze = t7xx_pci_pm_suspend,
.thaw = t7xx_pci_pm_thaw,
.poweroff = t7xx_pci_pm_suspend,
.restore = t7xx_pci_pm_resume,
.restore_noirq = t7xx_pci_pm_resume_noirq,
.runtime_suspend = t7xx_pci_pm_runtime_suspend,
.runtime_resume = t7xx_pci_pm_runtime_resume
};
static int t7xx_request_irq(struct pci_dev *pdev)
{
struct t7xx_pci_dev *t7xx_dev;
int ret = 0, i;
t7xx_dev = pci_get_drvdata(pdev);
for (i = 0; i < EXT_INT_NUM; i++) {
const char *irq_descr;
int irq_vec;
if (!t7xx_dev->intr_handler[i])
continue;
irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d",
dev_driver_string(&pdev->dev), i);
if (!irq_descr) {
ret = -ENOMEM;
break;
}
irq_vec = pci_irq_vector(pdev, i);
ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i],
t7xx_dev->intr_thread[i], 0, irq_descr,
t7xx_dev->callback_param[i]);
if (ret) {
dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret);
break;
}
}
if (ret) {
while (i--) {
if (!t7xx_dev->intr_handler[i])
continue;
free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
}
}
return ret;
}
static int t7xx_setup_msix(struct t7xx_pci_dev *t7xx_dev)
{
struct pci_dev *pdev = t7xx_dev->pdev;
int ret;
/* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */
ret = pci_alloc_irq_vectors(pdev, EXT_INT_NUM, EXT_INT_NUM, PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret);
return ret;
}
ret = t7xx_request_irq(pdev);
if (ret) {
pci_free_irq_vectors(pdev);
return ret;
}
t7xx_pcie_set_mac_msix_cfg(t7xx_dev, EXT_INT_NUM);
return 0;
}
static int t7xx_interrupt_init(struct t7xx_pci_dev *t7xx_dev)
{
int ret, i;
if (!t7xx_dev->pdev->msix_cap)
return -EINVAL;
ret = t7xx_setup_msix(t7xx_dev);
if (ret)
return ret;
/* IPs enable interrupts when ready */
for (i = 0; i < EXT_INT_NUM; i++)
t7xx_pcie_mac_set_int(t7xx_dev, i);
return 0;
}
static void t7xx_pci_infracfg_ao_calc(struct t7xx_pci_dev *t7xx_dev)
{
t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base +
INFRACFG_AO_DEV_CHIP -
t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
}
static int t7xx_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct t7xx_pci_dev *t7xx_dev;
int ret;
t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL);
if (!t7xx_dev)
return -ENOMEM;
pci_set_drvdata(pdev, t7xx_dev);
t7xx_dev->pdev = pdev;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
pci_set_master(pdev);
ret = pcim_iomap_regions(pdev, BIT(T7XX_PCI_IREG_BASE) | BIT(T7XX_PCI_EREG_BASE),
pci_name(pdev));
if (ret) {
dev_err(&pdev->dev, "Could not request BARs: %d\n", ret);
return -ENOMEM;
}
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret);
return ret;
}
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret);
return ret;
}
IREG_BASE(t7xx_dev) = pcim_iomap_table(pdev)[T7XX_PCI_IREG_BASE];
t7xx_dev->base_addr.pcie_ext_reg_base = pcim_iomap_table(pdev)[T7XX_PCI_EREG_BASE];
ret = t7xx_pci_pm_init(t7xx_dev);
if (ret)
return ret;
t7xx_pcie_mac_atr_init(t7xx_dev);
t7xx_pci_infracfg_ao_calc(t7xx_dev);
t7xx_mhccif_init(t7xx_dev);
ret = t7xx_md_init(t7xx_dev);
if (ret)
return ret;
t7xx_pcie_mac_interrupts_dis(t7xx_dev);
ret = t7xx_interrupt_init(t7xx_dev);
if (ret) {
t7xx_md_exit(t7xx_dev);
return ret;
}
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
t7xx_pcie_mac_interrupts_en(t7xx_dev);
return 0;
}
static void t7xx_pci_remove(struct pci_dev *pdev)
{
struct t7xx_pci_dev *t7xx_dev;
int i;
t7xx_dev = pci_get_drvdata(pdev);
t7xx_md_exit(t7xx_dev);
for (i = 0; i < EXT_INT_NUM; i++) {
if (!t7xx_dev->intr_handler[i])
continue;
free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]);
}
pci_free_irq_vectors(t7xx_dev->pdev);
}
static const struct pci_device_id t7xx_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
{ }
};
MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
static struct pci_driver t7xx_pci_driver = {
.name = "mtk_t7xx",
.id_table = t7xx_pci_table,
.probe = t7xx_pci_probe,
.remove = t7xx_pci_remove,
.driver.pm = &t7xx_pci_pm_ops,
.shutdown = t7xx_pci_shutdown,
};
module_pci_driver(t7xx_pci_driver);
MODULE_AUTHOR("MediaTek Inc");
MODULE_DESCRIPTION("MediaTek PCIe 5G WWAN modem T7xx driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_pci.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <[email protected]>
* Haijun Liu <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Andy Shevchenko <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Eliot Lee <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/dev_printk.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/types.h>
#include "t7xx_dpmaif.h"
#include "t7xx_reg.h"
#define ioread32_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(ioread32, addr, val, cond, delay_us, timeout_us)
static int t7xx_dpmaif_init_intr(struct dpmaif_hw_info *hw_info)
{
struct dpmaif_isr_en_mask *isr_en_msk = &hw_info->isr_en_mask;
u32 value, ul_intr_enable, dl_intr_enable;
int ret;
ul_intr_enable = DP_UL_INT_ERR_MSK | DP_UL_INT_QDONE_MSK;
isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;
iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
/* Set interrupt enable mask */
iowrite32(ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);
iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);
/* Check mask status */
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
value, (value & ul_intr_enable) != ul_intr_enable, 0,
DPMAIF_CHECK_INIT_TIMEOUT_US);
if (ret)
return ret;
dl_intr_enable = DP_DL_INT_PITCNT_LEN_ERR | DP_DL_INT_BATCNT_LEN_ERR;
isr_en_msk->ap_dl_l2intr_err_en_msk = dl_intr_enable;
ul_intr_enable = DPMAIF_DL_INT_DLQ0_QDONE | DPMAIF_DL_INT_DLQ0_PITCNT_LEN |
DPMAIF_DL_INT_DLQ1_QDONE | DPMAIF_DL_INT_DLQ1_PITCNT_LEN;
isr_en_msk->ap_ul_l2intr_en_msk = ul_intr_enable;
iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
/* Set DL ISR PD enable mask */
iowrite32(~ul_intr_enable, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0,
value, (value & ul_intr_enable) != ul_intr_enable, 0,
DPMAIF_CHECK_INIT_TIMEOUT_US);
if (ret)
return ret;
isr_en_msk->ap_udl_ip_busy_en_msk = DPMAIF_UDL_IP_BUSY;
iowrite32(DPMAIF_AP_IP_BUSY_MASK, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
iowrite32(isr_en_msk->ap_udl_ip_busy_en_msk,
hw_info->pcie_base + DPMAIF_AO_AP_DLUL_IP_BUSY_MASK);
value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);
value |= DPMAIF_DL_INT_Q2APTOP | DPMAIF_DL_INT_Q2TOQ1;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_AP_L1TIMR0);
iowrite32(DPMA_HPC_ALL_INT_MASK, hw_info->pcie_base + DPMAIF_HPC_INTR_MASK);
return 0;
}
static void t7xx_dpmaif_mask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num)
{
struct dpmaif_isr_en_mask *isr_en_msk;
u32 value, ul_int_que_done;
int ret;
isr_en_msk = &hw_info->isr_en_mask;
ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK;
isr_en_msk->ap_ul_l2intr_en_msk &= ~ul_int_que_done;
iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMSR0);
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
value, (value & ul_int_que_done) == ul_int_que_done, 0,
DPMAIF_CHECK_TIMEOUT_US);
if (ret)
dev_err(hw_info->dev,
"Could not mask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
value);
}
void t7xx_dpmaif_unmask_ulq_intr(struct dpmaif_hw_info *hw_info, unsigned int q_num)
{
struct dpmaif_isr_en_mask *isr_en_msk;
u32 value, ul_int_que_done;
int ret;
isr_en_msk = &hw_info->isr_en_mask;
ul_int_que_done = BIT(q_num + DP_UL_INT_DONE_OFFSET) & DP_UL_INT_QDONE_MSK;
isr_en_msk->ap_ul_l2intr_en_msk |= ul_int_que_done;
iowrite32(ul_int_que_done, hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMCR0);
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0,
value, (value & ul_int_que_done) != ul_int_que_done, 0,
DPMAIF_CHECK_TIMEOUT_US);
if (ret)
dev_err(hw_info->dev,
"Could not unmask the UL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
value);
}
void t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(struct dpmaif_hw_info *hw_info)
{
hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_BATCNT_LEN_ERR;
iowrite32(DP_DL_INT_BATCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
}
void t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info)
{
hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= DP_DL_INT_PITCNT_LEN_ERR;
iowrite32(DP_DL_INT_PITCNT_LEN_ERR, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
}
static u32 t7xx_update_dlq_intr(struct dpmaif_hw_info *hw_info, u32 q_done)
{
u32 value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0);
iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
return value;
}
static int t7xx_mask_dlq_intr(struct dpmaif_hw_info *hw_info, unsigned int qno)
{
u32 value, q_done;
int ret;
q_done = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE;
iowrite32(q_done, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
ret = read_poll_timeout_atomic(t7xx_update_dlq_intr, value, value & q_done,
0, DPMAIF_CHECK_TIMEOUT_US, false, hw_info, q_done);
if (ret) {
dev_err(hw_info->dev,
"Could not mask the DL interrupt. DPMAIF_AO_UL_AP_L2TIMR0 is 0x%x\n",
value);
return -ETIMEDOUT;
}
hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~q_done;
return 0;
}
void t7xx_dpmaif_dlq_unmask_rx_done(struct dpmaif_hw_info *hw_info, unsigned int qno)
{
u32 mask;
mask = qno == DPF_RX_QNO0 ? DPMAIF_DL_INT_DLQ0_QDONE : DPMAIF_DL_INT_DLQ1_QDONE;
iowrite32(mask, hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
hw_info->isr_en_mask.ap_dl_l2intr_en_msk |= mask;
}
void t7xx_dpmaif_clr_ip_busy_sts(struct dpmaif_hw_info *hw_info)
{
u32 ip_busy_sts;
ip_busy_sts = ioread32(hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
iowrite32(ip_busy_sts, hw_info->pcie_base + DPMAIF_AP_IP_BUSY);
}
static void t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
unsigned int qno)
{
if (qno == DPF_RX_QNO0)
iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN,
hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
else
iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN,
hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
}
void t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(struct dpmaif_hw_info *hw_info,
unsigned int qno)
{
if (qno == DPF_RX_QNO0)
iowrite32(DPMAIF_DL_INT_DLQ0_PITCNT_LEN,
hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
else
iowrite32(DPMAIF_DL_INT_DLQ1_PITCNT_LEN,
hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMCR0);
}
void t7xx_dpmaif_ul_clr_all_intr(struct dpmaif_hw_info *hw_info)
{
iowrite32(DPMAIF_AP_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
}
void t7xx_dpmaif_dl_clr_all_intr(struct dpmaif_hw_info *hw_info)
{
iowrite32(DPMAIF_AP_APDL_ALL_L2TISAR0_MASK, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
}
static void t7xx_dpmaif_set_intr_para(struct dpmaif_hw_intr_st_para *para,
enum dpmaif_hw_intr_type intr_type, unsigned int intr_queue)
{
para->intr_types[para->intr_cnt] = intr_type;
para->intr_queues[para->intr_cnt] = intr_queue;
para->intr_cnt++;
}
/* The para->intr_cnt counter is set to zero before this function is called.
* It does not check for overflow as there is no risk of overflowing intr_types or intr_queues.
*/
static void t7xx_dpmaif_hw_check_tx_intr(struct dpmaif_hw_info *hw_info,
unsigned int intr_status,
struct dpmaif_hw_intr_st_para *para)
{
unsigned long value;
value = FIELD_GET(DP_UL_INT_QDONE_MSK, intr_status);
if (value) {
unsigned int index;
t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DONE, value);
for_each_set_bit(index, &value, DPMAIF_TXQ_NUM)
t7xx_dpmaif_mask_ulq_intr(hw_info, index);
}
value = FIELD_GET(DP_UL_INT_EMPTY_MSK, intr_status);
if (value)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_DRB_EMPTY, value);
value = FIELD_GET(DP_UL_INT_MD_NOTREADY_MSK, intr_status);
if (value)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_NOTREADY, value);
value = FIELD_GET(DP_UL_INT_MD_PWR_NOTREADY_MSK, intr_status);
if (value)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_MD_PWR_NOTREADY, value);
value = FIELD_GET(DP_UL_INT_ERR_MSK, intr_status);
if (value)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_UL_LEN_ERR, value);
/* Clear interrupt status */
iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
}
/* The para->intr_cnt counter is set to zero before this function is called.
* It does not check for overflow as there is no risk of overflowing intr_types or intr_queues.
*/
static void t7xx_dpmaif_hw_check_rx_intr(struct dpmaif_hw_info *hw_info,
unsigned int intr_status,
struct dpmaif_hw_intr_st_para *para, int qno)
{
if (qno == DPF_RX_QNO_DFT) {
if (intr_status & DP_DL_INT_SKB_LEN_ERR)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_SKB_LEN_ERR, DPF_RX_QNO_DFT);
if (intr_status & DP_DL_INT_BATCNT_LEN_ERR) {
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_BATCNT_LEN_ERR, DPF_RX_QNO_DFT);
hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_BATCNT_LEN_ERR;
iowrite32(DP_DL_INT_BATCNT_LEN_ERR,
hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
}
if (intr_status & DP_DL_INT_PITCNT_LEN_ERR) {
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PITCNT_LEN_ERR, DPF_RX_QNO_DFT);
hw_info->isr_en_mask.ap_dl_l2intr_en_msk &= ~DP_DL_INT_PITCNT_LEN_ERR;
iowrite32(DP_DL_INT_PITCNT_LEN_ERR,
hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMSR0);
}
if (intr_status & DP_DL_INT_PKT_EMPTY_MSK)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_PKT_EMPTY_SET, DPF_RX_QNO_DFT);
if (intr_status & DP_DL_INT_FRG_EMPTY_MSK)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRG_EMPTY_SET, DPF_RX_QNO_DFT);
if (intr_status & DP_DL_INT_MTU_ERR_MSK)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_MTU_ERR, DPF_RX_QNO_DFT);
if (intr_status & DP_DL_INT_FRG_LEN_ERR_MSK)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_FRGCNT_LEN_ERR, DPF_RX_QNO_DFT);
if (intr_status & DP_DL_INT_Q0_PITCNT_LEN_ERR) {
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_PITCNT_LEN_ERR, BIT(qno));
t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno);
}
if (intr_status & DP_DL_INT_HPC_ENT_TYPE_ERR)
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_HPC_ENT_TYPE_ERR,
DPF_RX_QNO_DFT);
if (intr_status & DP_DL_INT_Q0_DONE) {
/* Mask RX done interrupt immediately after it occurs, do not clear
* the interrupt if the mask operation fails.
*/
if (!t7xx_mask_dlq_intr(hw_info, qno))
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q0_DONE, BIT(qno));
else
intr_status &= ~DP_DL_INT_Q0_DONE;
}
} else {
if (intr_status & DP_DL_INT_Q1_PITCNT_LEN_ERR) {
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_PITCNT_LEN_ERR, BIT(qno));
t7xx_dpmaif_dlq_mask_rx_pitcnt_len_err_intr(hw_info, qno);
}
if (intr_status & DP_DL_INT_Q1_DONE) {
if (!t7xx_mask_dlq_intr(hw_info, qno))
t7xx_dpmaif_set_intr_para(para, DPF_INTR_DL_Q1_DONE, BIT(qno));
else
intr_status &= ~DP_DL_INT_Q1_DONE;
}
}
intr_status |= DP_DL_INT_BATCNT_LEN_ERR;
/* Clear interrupt status */
iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
}
/**
* t7xx_dpmaif_hw_get_intr_cnt() - Reads interrupt status and count from HW.
* @hw_info: Pointer to struct hw_info.
* @para: Pointer to struct dpmaif_hw_intr_st_para.
* @qno: Queue number.
*
* Reads RX/TX interrupt status from HW and clears UL/DL status as needed.
*
* Return: Interrupt count.
*/
int t7xx_dpmaif_hw_get_intr_cnt(struct dpmaif_hw_info *hw_info,
struct dpmaif_hw_intr_st_para *para, int qno)
{
u32 rx_intr_status, tx_intr_status = 0;
u32 rx_intr_qdone, tx_intr_qdone = 0;
rx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_APDL_L2TISAR0);
rx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_APDL_L2TIMR0);
/* TX interrupt status */
if (qno == DPF_RX_QNO_DFT) {
/* All ULQ and DLQ0 interrupts use the same source no need to check ULQ interrupts
* when a DLQ1 interrupt has occurred.
*/
tx_intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
tx_intr_qdone = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_AP_L2TIMR0);
}
t7xx_dpmaif_clr_ip_busy_sts(hw_info);
if (qno == DPF_RX_QNO_DFT) {
/* Do not schedule bottom half again or clear UL interrupt status when we
* have already masked it.
*/
tx_intr_status &= ~tx_intr_qdone;
if (tx_intr_status)
t7xx_dpmaif_hw_check_tx_intr(hw_info, tx_intr_status, para);
}
if (rx_intr_status) {
if (qno == DPF_RX_QNO0) {
rx_intr_status &= DP_DL_Q0_STATUS_MASK;
if (rx_intr_qdone & DPMAIF_DL_INT_DLQ0_QDONE)
/* Do not schedule bottom half again or clear DL
* queue done interrupt status when we have already masked it.
*/
rx_intr_status &= ~DP_DL_INT_Q0_DONE;
} else {
rx_intr_status &= DP_DL_Q1_STATUS_MASK;
if (rx_intr_qdone & DPMAIF_DL_INT_DLQ1_QDONE)
rx_intr_status &= ~DP_DL_INT_Q1_DONE;
}
if (rx_intr_status)
t7xx_dpmaif_hw_check_rx_intr(hw_info, rx_intr_status, para, qno);
}
return para->intr_cnt;
}
static int t7xx_dpmaif_sram_init(struct dpmaif_hw_info *hw_info)
{
u32 value;
value = ioread32(hw_info->pcie_base + DPMAIF_AP_MEM_CLR);
value |= DPMAIF_MEM_CLR;
iowrite32(value, hw_info->pcie_base + DPMAIF_AP_MEM_CLR);
return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_MEM_CLR,
value, !(value & DPMAIF_MEM_CLR), 0,
DPMAIF_CHECK_INIT_TIMEOUT_US);
}
static void t7xx_dpmaif_hw_reset(struct dpmaif_hw_info *hw_info)
{
iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_ASSERT);
udelay(2);
iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_ASSERT);
udelay(2);
iowrite32(DPMAIF_AP_AO_RST_BIT, hw_info->pcie_base + DPMAIF_AP_AO_RGU_DEASSERT);
udelay(2);
iowrite32(DPMAIF_AP_RST_BIT, hw_info->pcie_base + DPMAIF_AP_RGU_DEASSERT);
udelay(2);
}
static int t7xx_dpmaif_hw_config(struct dpmaif_hw_info *hw_info)
{
u32 ap_port_mode;
int ret;
t7xx_dpmaif_hw_reset(hw_info);
ret = t7xx_dpmaif_sram_init(hw_info);
if (ret)
return ret;
ap_port_mode = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
ap_port_mode |= DPMAIF_PORT_MODE_PCIE;
iowrite32(ap_port_mode, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
iowrite32(DPMAIF_CG_EN, hw_info->pcie_base + DPMAIF_AP_CG_EN);
return 0;
}
static void t7xx_dpmaif_pcie_dpmaif_sign(struct dpmaif_hw_info *hw_info)
{
iowrite32(DPMAIF_PCIE_MODE_SET_VALUE, hw_info->pcie_base + DPMAIF_UL_RESERVE_AO_RW);
}
static void t7xx_dpmaif_dl_performance(struct dpmaif_hw_info *hw_info)
{
u32 enable_bat_cache, enable_pit_burst;
enable_bat_cache = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
enable_bat_cache |= DPMAIF_DL_BAT_CACHE_PRI;
iowrite32(enable_bat_cache, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
enable_pit_burst = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
enable_pit_burst |= DPMAIF_DL_BURST_PIT_EN;
iowrite32(enable_pit_burst, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
}
/* DPMAIF DL DLQ part HW setting */
static void t7xx_dpmaif_hw_hpc_cntl_set(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = DPMAIF_HPC_DLQ_PATH_MODE | DPMAIF_HPC_ADD_MODE_DF << 2;
value |= DPMAIF_HASH_PRIME_DF << 4;
value |= DPMAIF_HPC_TOTAL_NUM << 8;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_HPC_CNTL);
}
static void t7xx_dpmaif_hw_agg_cfg_set(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = DPMAIF_AGG_MAX_LEN_DF | DPMAIF_AGG_TBL_ENT_NUM_DF << 16;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_DLQ_AGG_CFG);
}
static void t7xx_dpmaif_hw_hash_bit_choose_set(struct dpmaif_hw_info *hw_info)
{
iowrite32(DPMAIF_DLQ_HASH_BIT_CHOOSE_DF,
hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_INIT_CON5);
}
static void t7xx_dpmaif_hw_mid_pit_timeout_thres_set(struct dpmaif_hw_info *hw_info)
{
iowrite32(DPMAIF_MID_TIMEOUT_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT0);
}
static void t7xx_dpmaif_hw_dlq_timeout_thres_set(struct dpmaif_hw_info *hw_info)
{
unsigned int value, i;
/* Each register holds two DLQ threshold timeout values */
for (i = 0; i < DPMAIF_HPC_MAX_TOTAL_NUM / 2; i++) {
value = FIELD_PREP(DPMAIF_DLQ_LOW_TIMEOUT_THRES_MKS, DPMAIF_DLQ_TIMEOUT_THRES_DF);
value |= FIELD_PREP(DPMAIF_DLQ_HIGH_TIMEOUT_THRES_MSK,
DPMAIF_DLQ_TIMEOUT_THRES_DF);
iowrite32(value,
hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TIMEOUT1 + sizeof(u32) * i);
}
}
static void t7xx_dpmaif_hw_dlq_start_prs_thres_set(struct dpmaif_hw_info *hw_info)
{
iowrite32(DPMAIF_DLQ_PRS_THRES_DF, hw_info->pcie_base + DPMAIF_AO_DL_DLQPIT_TRIG_THRES);
}
static void t7xx_dpmaif_dl_dlq_hpc_hw_init(struct dpmaif_hw_info *hw_info)
{
t7xx_dpmaif_hw_hpc_cntl_set(hw_info);
t7xx_dpmaif_hw_agg_cfg_set(hw_info);
t7xx_dpmaif_hw_hash_bit_choose_set(hw_info);
t7xx_dpmaif_hw_mid_pit_timeout_thres_set(hw_info);
t7xx_dpmaif_hw_dlq_timeout_thres_set(hw_info);
t7xx_dpmaif_hw_dlq_start_prs_thres_set(hw_info);
}
static int t7xx_dpmaif_dl_bat_init_done(struct dpmaif_hw_info *hw_info, bool frg_en)
{
u32 value, dl_bat_init = 0;
int ret;
if (frg_en)
dl_bat_init = DPMAIF_DL_BAT_FRG_INIT;
dl_bat_init |= DPMAIF_DL_BAT_INIT_ALLSET;
dl_bat_init |= DPMAIF_DL_BAT_INIT_EN;
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
DPMAIF_CHECK_INIT_TIMEOUT_US);
if (ret) {
dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n");
return ret;
}
iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT);
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
DPMAIF_CHECK_INIT_TIMEOUT_US);
if (ret)
dev_err(hw_info->dev, "Data plane modem DL BAT initialization failed\n");
return ret;
}
static void t7xx_dpmaif_dl_set_bat_base_addr(struct dpmaif_hw_info *hw_info,
dma_addr_t addr)
{
iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON0);
iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON3);
}
static void t7xx_dpmaif_dl_set_bat_size(struct dpmaif_hw_info *hw_info, unsigned int size)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
value &= ~DPMAIF_BAT_SIZE_MSK;
value |= size & DPMAIF_BAT_SIZE_MSK;
iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
}
static void t7xx_dpmaif_dl_bat_en(struct dpmaif_hw_info *hw_info, bool enable)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
if (enable)
value |= DPMAIF_BAT_EN_MSK;
else
value &= ~DPMAIF_BAT_EN_MSK;
iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
}
static void t7xx_dpmaif_dl_set_ao_bid_maxcnt(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
value &= ~DPMAIF_BAT_BID_MAXCNT_MSK;
value |= FIELD_PREP(DPMAIF_BAT_BID_MAXCNT_MSK, DPMAIF_HW_PKT_BIDCNT);
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
}
static void t7xx_dpmaif_dl_set_ao_mtu(struct dpmaif_hw_info *hw_info)
{
iowrite32(DPMAIF_HW_MTU_SIZE, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON1);
}
static void t7xx_dpmaif_dl_set_ao_pit_chknum(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
value &= ~DPMAIF_PIT_CHK_NUM_MSK;
value |= FIELD_PREP(DPMAIF_PIT_CHK_NUM_MSK, DPMAIF_HW_CHK_PIT_NUM);
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
}
static void t7xx_dpmaif_dl_set_ao_remain_minsz(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
value &= ~DPMAIF_BAT_REMAIN_MINSZ_MSK;
value |= FIELD_PREP(DPMAIF_BAT_REMAIN_MINSZ_MSK,
DPMAIF_HW_BAT_REMAIN / DPMAIF_BAT_REMAIN_SZ_BASE);
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON0);
}
static void t7xx_dpmaif_dl_set_ao_bat_bufsz(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
value &= ~DPMAIF_BAT_BUF_SZ_MSK;
value |= FIELD_PREP(DPMAIF_BAT_BUF_SZ_MSK,
DPMAIF_HW_BAT_PKTBUF / DPMAIF_BAT_BUFFER_SZ_BASE);
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
}
static void t7xx_dpmaif_dl_set_ao_bat_rsv_length(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
value &= ~DPMAIF_BAT_RSV_LEN_MSK;
value |= DPMAIF_HW_BAT_RSVLEN;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PKTINFO_CON2);
}
static void t7xx_dpmaif_dl_set_pkt_alignment(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
value &= ~DPMAIF_PKT_ALIGN_MSK;
value |= DPMAIF_PKT_ALIGN_EN;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
}
static void t7xx_dpmaif_dl_set_pkt_checksum(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
value |= DPMAIF_DL_PKT_CHECKSUM_EN;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
}
static void t7xx_dpmaif_dl_set_ao_frg_check_thres(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
value &= ~DPMAIF_FRG_CHECK_THRES_MSK;
value |= DPMAIF_HW_CHK_FRG_NUM;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
}
static void t7xx_dpmaif_dl_set_ao_frg_bufsz(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
value &= ~DPMAIF_FRG_BUF_SZ_MSK;
value |= FIELD_PREP(DPMAIF_FRG_BUF_SZ_MSK,
DPMAIF_HW_FRG_PKTBUF / DPMAIF_FRG_BUFFER_SZ_BASE);
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
}
static void t7xx_dpmaif_dl_frg_ao_en(struct dpmaif_hw_info *hw_info, bool enable)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
if (enable)
value |= DPMAIF_FRG_EN_MSK;
else
value &= ~DPMAIF_FRG_EN_MSK;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_FRG_THRES);
}
static void t7xx_dpmaif_dl_set_ao_bat_check_thres(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
value &= ~DPMAIF_BAT_CHECK_THRES_MSK;
value |= FIELD_PREP(DPMAIF_BAT_CHECK_THRES_MSK, DPMAIF_HW_CHK_BAT_NUM);
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_RDY_CHK_THRES);
}
static void t7xx_dpmaif_dl_set_pit_seqnum(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END);
value &= ~DPMAIF_DL_PIT_SEQ_MSK;
value |= DPMAIF_DL_PIT_SEQ_VALUE;
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_DL_PIT_SEQ_END);
}
static void t7xx_dpmaif_dl_set_dlq_pit_base_addr(struct dpmaif_hw_info *hw_info,
dma_addr_t addr)
{
iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON0);
iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON4);
}
static void t7xx_dpmaif_dl_set_dlq_pit_size(struct dpmaif_hw_info *hw_info, unsigned int size)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1);
value &= ~DPMAIF_PIT_SIZE_MSK;
value |= size & DPMAIF_PIT_SIZE_MSK;
iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON1);
iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON2);
iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON5);
iowrite32(0, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON6);
}
static void t7xx_dpmaif_dl_dlq_pit_en(struct dpmaif_hw_info *hw_info)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
value |= DPMAIF_DLQPIT_EN_MSK;
iowrite32(value, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT_CON3);
}
static void t7xx_dpmaif_dl_dlq_pit_init_done(struct dpmaif_hw_info *hw_info,
unsigned int pit_idx)
{
unsigned int dl_pit_init;
int timeout;
u32 value;
dl_pit_init = DPMAIF_DL_PIT_INIT_ALLSET;
dl_pit_init |= (pit_idx << DPMAIF_DLQPIT_CHAN_OFS);
dl_pit_init |= DPMAIF_DL_PIT_INIT_EN;
timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT,
value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY),
DPMAIF_CHECK_DELAY_US,
DPMAIF_CHECK_INIT_TIMEOUT_US);
if (timeout) {
dev_err(hw_info->dev, "Data plane modem DL PIT is not ready\n");
return;
}
iowrite32(dl_pit_init, hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT);
timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_INIT,
value, !(value & DPMAIF_DL_PIT_INIT_NOT_READY),
DPMAIF_CHECK_DELAY_US,
DPMAIF_CHECK_INIT_TIMEOUT_US);
if (timeout)
dev_err(hw_info->dev, "Data plane modem DL PIT initialization failed\n");
}
static void t7xx_dpmaif_config_dlq_pit_hw(struct dpmaif_hw_info *hw_info, unsigned int q_num,
struct dpmaif_dl *dl_que)
{
t7xx_dpmaif_dl_set_dlq_pit_base_addr(hw_info, dl_que->pit_base);
t7xx_dpmaif_dl_set_dlq_pit_size(hw_info, dl_que->pit_size_cnt);
t7xx_dpmaif_dl_dlq_pit_en(hw_info);
t7xx_dpmaif_dl_dlq_pit_init_done(hw_info, q_num);
}
static void t7xx_dpmaif_config_all_dlq_hw(struct dpmaif_hw_info *hw_info)
{
int i;
for (i = 0; i < DPMAIF_RXQ_NUM; i++)
t7xx_dpmaif_config_dlq_pit_hw(hw_info, i, &hw_info->dl_que[i]);
}
static void t7xx_dpmaif_dl_all_q_en(struct dpmaif_hw_info *hw_info, bool enable)
{
u32 dl_bat_init, value;
int timeout;
value = ioread32(hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
if (enable)
value |= DPMAIF_BAT_EN_MSK;
else
value &= ~DPMAIF_BAT_EN_MSK;
iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_INIT_CON1);
dl_bat_init = DPMAIF_DL_BAT_INIT_ONLY_ENABLE_BIT;
dl_bat_init |= DPMAIF_DL_BAT_INIT_EN;
timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
if (timeout)
dev_err(hw_info->dev, "Timeout updating BAT setting to HW\n");
iowrite32(dl_bat_init, hw_info->pcie_base + DPMAIF_DL_BAT_INIT);
timeout = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_INIT,
value, !(value & DPMAIF_DL_BAT_INIT_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
if (timeout)
dev_err(hw_info->dev, "Data plane modem DL BAT is not ready\n");
}
static int t7xx_dpmaif_config_dlq_hw(struct dpmaif_hw_info *hw_info)
{
struct dpmaif_dl *dl_que;
int ret;
t7xx_dpmaif_dl_dlq_hpc_hw_init(hw_info);
dl_que = &hw_info->dl_que[0]; /* All queues share one BAT/frag BAT table */
if (!dl_que->que_started)
return -EBUSY;
t7xx_dpmaif_dl_set_ao_remain_minsz(hw_info);
t7xx_dpmaif_dl_set_ao_bat_bufsz(hw_info);
t7xx_dpmaif_dl_set_ao_frg_bufsz(hw_info);
t7xx_dpmaif_dl_set_ao_bat_rsv_length(hw_info);
t7xx_dpmaif_dl_set_ao_bid_maxcnt(hw_info);
t7xx_dpmaif_dl_set_pkt_alignment(hw_info);
t7xx_dpmaif_dl_set_pit_seqnum(hw_info);
t7xx_dpmaif_dl_set_ao_mtu(hw_info);
t7xx_dpmaif_dl_set_ao_pit_chknum(hw_info);
t7xx_dpmaif_dl_set_ao_bat_check_thres(hw_info);
t7xx_dpmaif_dl_set_ao_frg_check_thres(hw_info);
t7xx_dpmaif_dl_frg_ao_en(hw_info, true);
t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->frg_base);
t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->frg_size_cnt);
t7xx_dpmaif_dl_bat_en(hw_info, true);
ret = t7xx_dpmaif_dl_bat_init_done(hw_info, true);
if (ret)
return ret;
t7xx_dpmaif_dl_set_bat_base_addr(hw_info, dl_que->bat_base);
t7xx_dpmaif_dl_set_bat_size(hw_info, dl_que->bat_size_cnt);
t7xx_dpmaif_dl_bat_en(hw_info, false);
ret = t7xx_dpmaif_dl_bat_init_done(hw_info, false);
if (ret)
return ret;
/* Init PIT (two PIT table) */
t7xx_dpmaif_config_all_dlq_hw(hw_info);
t7xx_dpmaif_dl_all_q_en(hw_info, true);
t7xx_dpmaif_dl_set_pkt_checksum(hw_info);
return 0;
}
static void t7xx_dpmaif_ul_update_drb_size(struct dpmaif_hw_info *hw_info,
unsigned int q_num, unsigned int size)
{
unsigned int value;
value = ioread32(hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num));
value &= ~DPMAIF_DRB_SIZE_MSK;
value |= size & DPMAIF_DRB_SIZE_MSK;
iowrite32(value, hw_info->pcie_base + DPMAIF_UL_DRBSIZE_ADDRH_n(q_num));
}
static void t7xx_dpmaif_ul_update_drb_base_addr(struct dpmaif_hw_info *hw_info,
unsigned int q_num, dma_addr_t addr)
{
iowrite32(lower_32_bits(addr), hw_info->pcie_base + DPMAIF_ULQSAR_n(q_num));
iowrite32(upper_32_bits(addr), hw_info->pcie_base + DPMAIF_UL_DRB_ADDRH_n(q_num));
}
static void t7xx_dpmaif_ul_rdy_en(struct dpmaif_hw_info *hw_info,
unsigned int q_num, bool ready)
{
u32 value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
if (ready)
value |= BIT(q_num);
else
value &= ~BIT(q_num);
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
}
static void t7xx_dpmaif_ul_arb_en(struct dpmaif_hw_info *hw_info,
unsigned int q_num, bool enable)
{
u32 value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
if (enable)
value |= BIT(q_num + 8);
else
value &= ~BIT(q_num + 8);
iowrite32(value, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
}
static void t7xx_dpmaif_config_ulq_hw(struct dpmaif_hw_info *hw_info)
{
struct dpmaif_ul *ul_que;
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
ul_que = &hw_info->ul_que[i];
if (ul_que->que_started) {
t7xx_dpmaif_ul_update_drb_size(hw_info, i, ul_que->drb_size_cnt *
DPMAIF_UL_DRB_SIZE_WORD);
t7xx_dpmaif_ul_update_drb_base_addr(hw_info, i, ul_que->drb_base);
t7xx_dpmaif_ul_rdy_en(hw_info, i, true);
t7xx_dpmaif_ul_arb_en(hw_info, i, true);
} else {
t7xx_dpmaif_ul_arb_en(hw_info, i, false);
}
}
}
static int t7xx_dpmaif_hw_init_done(struct dpmaif_hw_info *hw_info)
{
u32 ap_cfg;
int ret;
ap_cfg = ioread32(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG);
ap_cfg |= DPMAIF_SRAM_SYNC;
iowrite32(ap_cfg, hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG);
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_AP_OVERWRITE_CFG,
ap_cfg, !(ap_cfg & DPMAIF_SRAM_SYNC), 0,
DPMAIF_CHECK_TIMEOUT_US);
if (ret)
return ret;
iowrite32(DPMAIF_UL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_UL_INIT_SET);
iowrite32(DPMAIF_DL_INIT_DONE, hw_info->pcie_base + DPMAIF_AO_DL_INIT_SET);
return 0;
}
static bool t7xx_dpmaif_dl_idle_check(struct dpmaif_hw_info *hw_info)
{
u32 dpmaif_dl_is_busy = ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY);
return !(dpmaif_dl_is_busy & DPMAIF_DL_IDLE_STS);
}
static void t7xx_dpmaif_ul_all_q_en(struct dpmaif_hw_info *hw_info, bool enable)
{
u32 ul_arb_en = ioread32(hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
if (enable)
ul_arb_en |= DPMAIF_UL_ALL_QUE_ARB_EN;
else
ul_arb_en &= ~DPMAIF_UL_ALL_QUE_ARB_EN;
iowrite32(ul_arb_en, hw_info->pcie_base + DPMAIF_AO_UL_CHNL_ARB0);
}
static bool t7xx_dpmaif_ul_idle_check(struct dpmaif_hw_info *hw_info)
{
u32 dpmaif_ul_is_busy = ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY);
return !(dpmaif_ul_is_busy & DPMAIF_UL_IDLE_STS);
}
void t7xx_dpmaif_ul_update_hw_drb_cnt(struct dpmaif_hw_info *hw_info, unsigned int q_num,
unsigned int drb_entry_cnt)
{
u32 ul_update, value;
int err;
ul_update = drb_entry_cnt & DPMAIF_UL_ADD_COUNT_MASK;
ul_update |= DPMAIF_UL_ADD_UPDATE;
err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num),
value, !(value & DPMAIF_UL_ADD_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
if (err) {
dev_err(hw_info->dev, "UL add is not ready\n");
return;
}
iowrite32(ul_update, hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num));
err = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_ULQ_ADD_DESC_CH_n(q_num),
value, !(value & DPMAIF_UL_ADD_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
if (err)
dev_err(hw_info->dev, "Timeout updating UL add\n");
}
unsigned int t7xx_dpmaif_ul_get_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
{
unsigned int value = ioread32(hw_info->pcie_base + DPMAIF_ULQ_STA0_n(q_num));
return FIELD_GET(DPMAIF_UL_DRB_RIDX_MSK, value) / DPMAIF_UL_DRB_SIZE_WORD;
}
int t7xx_dpmaif_dlq_add_pit_remain_cnt(struct dpmaif_hw_info *hw_info, unsigned int dlq_pit_idx,
unsigned int pit_remain_cnt)
{
u32 dl_update, value;
int ret;
dl_update = pit_remain_cnt & DPMAIF_PIT_REM_CNT_MSK;
dl_update |= DPMAIF_DL_ADD_UPDATE | (dlq_pit_idx << DPMAIF_ADD_DLQ_PIT_CHAN_OFS);
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD,
value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
if (ret) {
dev_err(hw_info->dev, "Data plane modem is not ready to add dlq\n");
return ret;
}
iowrite32(dl_update, hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD);
ret = ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_DLQPIT_ADD,
value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
if (ret) {
dev_err(hw_info->dev, "Data plane modem add dlq failed\n");
return ret;
}
return 0;
}
unsigned int t7xx_dpmaif_dl_dlq_pit_get_wr_idx(struct dpmaif_hw_info *hw_info,
unsigned int dlq_pit_idx)
{
u32 value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_DLQ_WR_IDX +
dlq_pit_idx * DLQ_PIT_IDX_SIZE);
return value & DPMAIF_DL_RD_WR_IDX_MSK;
}
static int t7xx_dl_add_timedout(struct dpmaif_hw_info *hw_info)
{
u32 value;
return ioread32_poll_timeout_atomic(hw_info->pcie_base + DPMAIF_DL_BAT_ADD,
value, !(value & DPMAIF_DL_ADD_NOT_READY), 0,
DPMAIF_CHECK_TIMEOUT_US);
}
int t7xx_dpmaif_dl_snd_hw_bat_cnt(struct dpmaif_hw_info *hw_info, unsigned int bat_entry_cnt)
{
unsigned int value;
if (t7xx_dl_add_timedout(hw_info)) {
dev_err(hw_info->dev, "DL add BAT not ready\n");
return -EBUSY;
}
value = bat_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK;
value |= DPMAIF_DL_ADD_UPDATE;
iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD);
if (t7xx_dl_add_timedout(hw_info)) {
dev_err(hw_info->dev, "DL add BAT timeout\n");
return -EBUSY;
}
return 0;
}
unsigned int t7xx_dpmaif_dl_get_bat_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
{
u32 value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_RD_IDX);
return value & DPMAIF_DL_RD_WR_IDX_MSK;
}
unsigned int t7xx_dpmaif_dl_get_bat_wr_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
{
u32 value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_BAT_WR_IDX);
return value & DPMAIF_DL_RD_WR_IDX_MSK;
}
int t7xx_dpmaif_dl_snd_hw_frg_cnt(struct dpmaif_hw_info *hw_info, unsigned int frg_entry_cnt)
{
unsigned int value;
if (t7xx_dl_add_timedout(hw_info)) {
dev_err(hw_info->dev, "Data plane modem is not ready to add frag DLQ\n");
return -EBUSY;
}
value = frg_entry_cnt & DPMAIF_DL_ADD_COUNT_MASK;
value |= DPMAIF_DL_FRG_ADD_UPDATE | DPMAIF_DL_ADD_UPDATE;
iowrite32(value, hw_info->pcie_base + DPMAIF_DL_BAT_ADD);
if (t7xx_dl_add_timedout(hw_info)) {
dev_err(hw_info->dev, "Data plane modem add frag DLQ failed");
return -EBUSY;
}
return 0;
}
unsigned int t7xx_dpmaif_dl_get_frg_rd_idx(struct dpmaif_hw_info *hw_info, unsigned int q_num)
{
u32 value;
value = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_FRGBAT_RD_IDX);
return value & DPMAIF_DL_RD_WR_IDX_MSK;
}
static void t7xx_dpmaif_set_queue_property(struct dpmaif_hw_info *hw_info,
struct dpmaif_hw_params *init_para)
{
struct dpmaif_dl *dl_que;
struct dpmaif_ul *ul_que;
int i;
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
dl_que = &hw_info->dl_que[i];
dl_que->bat_base = init_para->pkt_bat_base_addr[i];
dl_que->bat_size_cnt = init_para->pkt_bat_size_cnt[i];
dl_que->pit_base = init_para->pit_base_addr[i];
dl_que->pit_size_cnt = init_para->pit_size_cnt[i];
dl_que->frg_base = init_para->frg_bat_base_addr[i];
dl_que->frg_size_cnt = init_para->frg_bat_size_cnt[i];
dl_que->que_started = true;
}
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
ul_que = &hw_info->ul_que[i];
ul_que->drb_base = init_para->drb_base_addr[i];
ul_que->drb_size_cnt = init_para->drb_size_cnt[i];
ul_que->que_started = true;
}
}
/**
* t7xx_dpmaif_hw_stop_all_txq() - Stop all TX queues.
* @hw_info: Pointer to struct hw_info.
*
* Disable HW UL queues. Checks busy UL queues to go to idle
* with an attempt count of 1000000.
*
* Return:
* * 0 - Success
* * -ETIMEDOUT - Timed out checking busy queues
*/
int t7xx_dpmaif_hw_stop_all_txq(struct dpmaif_hw_info *hw_info)
{
int count = 0;
t7xx_dpmaif_ul_all_q_en(hw_info, false);
while (t7xx_dpmaif_ul_idle_check(hw_info)) {
if (++count >= DPMAIF_MAX_CHECK_COUNT) {
dev_err(hw_info->dev, "Failed to stop TX, status: 0x%x\n",
ioread32(hw_info->pcie_base + DPMAIF_UL_CHK_BUSY));
return -ETIMEDOUT;
}
}
return 0;
}
/**
* t7xx_dpmaif_hw_stop_all_rxq() - Stop all RX queues.
* @hw_info: Pointer to struct hw_info.
*
* Disable HW DL queue. Checks busy UL queues to go to idle
* with an attempt count of 1000000.
* Check that HW PIT write index equals read index with the same
* attempt count.
*
* Return:
* * 0 - Success.
* * -ETIMEDOUT - Timed out checking busy queues.
*/
int t7xx_dpmaif_hw_stop_all_rxq(struct dpmaif_hw_info *hw_info)
{
unsigned int wr_idx, rd_idx;
int count = 0;
t7xx_dpmaif_dl_all_q_en(hw_info, false);
while (t7xx_dpmaif_dl_idle_check(hw_info)) {
if (++count >= DPMAIF_MAX_CHECK_COUNT) {
dev_err(hw_info->dev, "Failed to stop RX, status: 0x%x\n",
ioread32(hw_info->pcie_base + DPMAIF_DL_CHK_BUSY));
return -ETIMEDOUT;
}
}
/* Check middle PIT sync done */
count = 0;
do {
wr_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_WR_IDX);
wr_idx &= DPMAIF_DL_RD_WR_IDX_MSK;
rd_idx = ioread32(hw_info->pcie_base + DPMAIF_AO_DL_PIT_RD_IDX);
rd_idx &= DPMAIF_DL_RD_WR_IDX_MSK;
if (wr_idx == rd_idx)
return 0;
} while (++count < DPMAIF_MAX_CHECK_COUNT);
dev_err(hw_info->dev, "Check middle PIT sync fail\n");
return -ETIMEDOUT;
}
void t7xx_dpmaif_start_hw(struct dpmaif_hw_info *hw_info)
{
t7xx_dpmaif_ul_all_q_en(hw_info, true);
t7xx_dpmaif_dl_all_q_en(hw_info, true);
}
/**
* t7xx_dpmaif_hw_init() - Initialize HW data path API.
* @hw_info: Pointer to struct hw_info.
* @init_param: Pointer to struct dpmaif_hw_params.
*
* Configures port mode, clock config, HW interrupt initialization, and HW queue.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code from failure sub-initializations.
*/
int t7xx_dpmaif_hw_init(struct dpmaif_hw_info *hw_info, struct dpmaif_hw_params *init_param)
{
int ret;
ret = t7xx_dpmaif_hw_config(hw_info);
if (ret) {
dev_err(hw_info->dev, "DPMAIF HW config failed\n");
return ret;
}
ret = t7xx_dpmaif_init_intr(hw_info);
if (ret) {
dev_err(hw_info->dev, "DPMAIF HW interrupts init failed\n");
return ret;
}
t7xx_dpmaif_set_queue_property(hw_info, init_param);
t7xx_dpmaif_pcie_dpmaif_sign(hw_info);
t7xx_dpmaif_dl_performance(hw_info);
ret = t7xx_dpmaif_config_dlq_hw(hw_info);
if (ret) {
dev_err(hw_info->dev, "DPMAIF HW dlq config failed\n");
return ret;
}
t7xx_dpmaif_config_ulq_hw(hw_info);
ret = t7xx_dpmaif_hw_init_done(hw_info);
if (ret)
dev_err(hw_info->dev, "DPMAIF HW queue init failed\n");
return ret;
}
bool t7xx_dpmaif_ul_clr_done(struct dpmaif_hw_info *hw_info, unsigned int qno)
{
u32 intr_status;
intr_status = ioread32(hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
intr_status &= BIT(DP_UL_INT_DONE_OFFSET + qno);
if (intr_status) {
iowrite32(intr_status, hw_info->pcie_base + DPMAIF_AP_L2TISAR0);
return true;
}
return false;
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_dpmaif.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <[email protected]>
* Sreehari Kancharla <[email protected]>
*
* Contributors:
* Amir Hanania <[email protected]>
* Ricardo Martinez <[email protected]>
*/
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/dev_printk.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
#include "t7xx_mhccif.h"
#include "t7xx_modem_ops.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_reg.h"
#define D2H_INT_SR_ACK (D2H_INT_SUSPEND_ACK | \
D2H_INT_RESUME_ACK | \
D2H_INT_SUSPEND_ACK_AP | \
D2H_INT_RESUME_ACK_AP)
static void t7xx_mhccif_clear_interrupts(struct t7xx_pci_dev *t7xx_dev, u32 mask)
{
void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base;
/* Clear level 2 interrupt */
iowrite32(mask, mhccif_pbase + REG_EP2RC_SW_INT_ACK);
/* Ensure write is complete */
t7xx_mhccif_read_sw_int_sts(t7xx_dev);
/* Clear level 1 interrupt */
t7xx_pcie_mac_clear_int_status(t7xx_dev, MHCCIF_INT);
}
static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
u32 int_status, val;
val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1);
iowrite32(val, IREG_BASE(t7xx_dev) + DISABLE_ASPM_LOWPWR);
int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev);
if (int_status & D2H_SW_INT_MASK) {
int ret = t7xx_pci_mhccif_isr(t7xx_dev);
if (ret)
dev_err(&t7xx_dev->pdev->dev, "PCI MHCCIF ISR failure: %d", ret);
}
t7xx_mhccif_clear_interrupts(t7xx_dev, int_status);
if (int_status & D2H_INT_DS_LOCK_ACK)
complete_all(&t7xx_dev->sleep_lock_acquire);
if (int_status & D2H_INT_SR_ACK)
complete(&t7xx_dev->pm_sr_ack);
iowrite32(T7XX_L1_BIT(1), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
int_status = t7xx_mhccif_read_sw_int_sts(t7xx_dev);
if (!int_status) {
val = T7XX_L1_1_BIT(1) | T7XX_L1_2_BIT(1);
iowrite32(val, IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
}
t7xx_pcie_mac_set_int(t7xx_dev, MHCCIF_INT);
return IRQ_HANDLED;
}
u32 t7xx_mhccif_read_sw_int_sts(struct t7xx_pci_dev *t7xx_dev)
{
return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_STS);
}
void t7xx_mhccif_mask_set(struct t7xx_pci_dev *t7xx_dev, u32 val)
{
iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_SET);
}
void t7xx_mhccif_mask_clr(struct t7xx_pci_dev *t7xx_dev, u32 val)
{
iowrite32(val, t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK_CLR);
}
u32 t7xx_mhccif_mask_get(struct t7xx_pci_dev *t7xx_dev)
{
return ioread32(t7xx_dev->base_addr.mhccif_rc_base + REG_EP2RC_SW_INT_EAP_MASK);
}
static irqreturn_t t7xx_mhccif_isr_handler(int irq, void *data)
{
return IRQ_WAKE_THREAD;
}
void t7xx_mhccif_init(struct t7xx_pci_dev *t7xx_dev)
{
t7xx_dev->base_addr.mhccif_rc_base = t7xx_dev->base_addr.pcie_ext_reg_base +
MHCCIF_RC_DEV_BASE -
t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
t7xx_dev->intr_handler[MHCCIF_INT] = t7xx_mhccif_isr_handler;
t7xx_dev->intr_thread[MHCCIF_INT] = t7xx_mhccif_isr_thread;
t7xx_dev->callback_param[MHCCIF_INT] = t7xx_dev;
}
void t7xx_mhccif_h2d_swint_trigger(struct t7xx_pci_dev *t7xx_dev, u32 channel)
{
void __iomem *mhccif_pbase = t7xx_dev->base_addr.mhccif_rc_base;
iowrite32(BIT(channel), mhccif_pbase + REG_RC2EP_SW_BSY);
iowrite32(channel, mhccif_pbase + REG_RC2EP_SW_TCHNUM);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_mhccif.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <[email protected]>
* Haijun Liu <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
* Sreehari Kancharla <[email protected]>
*
* Contributors:
* Andy Shevchenko <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Eliot Lee <[email protected]>
*/
#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/dma-direction.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iopoll.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_cldma.h"
#include "t7xx_hif_cldma.h"
#include "t7xx_mhccif.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_port_proxy.h"
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"
#define MAX_TX_BUDGET 16
#define MAX_RX_BUDGET 16
#define CHECK_Q_STOP_TIMEOUT_US 1000000
#define CHECK_Q_STOP_STEP_US 10000
#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index)
{
queue->dir = tx_rx;
queue->index = index;
queue->md_ctrl = md_ctrl;
queue->tr_ring = NULL;
queue->tr_done = NULL;
queue->tx_next = NULL;
}
static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index)
{
md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
init_waitqueue_head(&queue->req_wq);
spin_lock_init(&queue->ring_lock);
}
static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
{
gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
}
static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
{
gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
}
static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
size_t size, gfp_t gfp_mask)
{
req->skb = __dev_alloc_skb(size, gfp_mask);
if (!req->skb)
return -ENOMEM;
req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
dev_kfree_skb_any(req->skb);
req->skb = NULL;
req->mapped_buff = 0;
dev_err(md_ctrl->dev, "DMA mapping failed\n");
return -ENOMEM;
}
return 0;
}
static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
{
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
unsigned int hwo_polling_count = 0;
struct t7xx_cldma_hw *hw_info;
bool rx_not_done = true;
unsigned long flags;
int count = 0;
hw_info = &md_ctrl->hw_info;
do {
struct cldma_request *req;
struct cldma_gpd *gpd;
struct sk_buff *skb;
int ret;
req = queue->tr_done;
if (!req)
return -ENODATA;
gpd = req->gpd;
if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
dma_addr_t gpd_addr;
if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
return -ENODEV;
}
gpd_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_DL_CURRENT_ADDRL_0 +
queue->index * sizeof(u64));
if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
return 0;
udelay(1);
continue;
}
hwo_polling_count = 0;
skb = req->skb;
if (req->mapped_buff) {
dma_unmap_single(md_ctrl->dev, req->mapped_buff,
queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
req->mapped_buff = 0;
}
skb->len = 0;
skb_reset_tail_pointer(skb);
skb_put(skb, le16_to_cpu(gpd->data_buff_len));
ret = md_ctrl->recv_skb(queue, skb);
/* Break processing, will try again later */
if (ret < 0)
return ret;
req->skb = NULL;
t7xx_cldma_gpd_set_data_ptr(gpd, 0);
spin_lock_irqsave(&queue->ring_lock, flags);
queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
spin_unlock_irqrestore(&queue->ring_lock, flags);
req = queue->rx_refill;
ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
if (ret)
return ret;
gpd = req->gpd;
t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
gpd->data_buff_len = 0;
gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
spin_lock_irqsave(&queue->ring_lock, flags);
queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
spin_unlock_irqrestore(&queue->ring_lock, flags);
rx_not_done = ++count < budget || !need_resched();
} while (rx_not_done);
*over_budget = true;
return 0;
}
static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
{
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
struct t7xx_cldma_hw *hw_info;
unsigned int pending_rx_int;
bool over_budget = false;
unsigned long flags;
int ret;
hw_info = &md_ctrl->hw_info;
do {
ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
if (ret == -ENODATA)
return 0;
else if (ret)
return ret;
pending_rx_int = 0;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
if (md_ctrl->rxq_active & BIT(queue->index)) {
if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
MTK_RX);
if (pending_rx_int) {
t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
if (over_budget) {
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
return -EAGAIN;
}
}
}
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
} while (pending_rx_int);
return 0;
}
static void t7xx_cldma_rx_done(struct work_struct *work)
{
struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
int value;
value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
if (value && md_ctrl->rxq_active & BIT(queue->index)) {
queue_work(queue->worker, &queue->cldma_work);
return;
}
t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
pm_runtime_mark_last_busy(md_ctrl->dev);
pm_runtime_put_autosuspend(md_ctrl->dev);
}
static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
{
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
unsigned int dma_len, count = 0;
struct cldma_request *req;
struct cldma_gpd *gpd;
unsigned long flags;
dma_addr_t dma_free;
struct sk_buff *skb;
while (!kthread_should_stop()) {
spin_lock_irqsave(&queue->ring_lock, flags);
req = queue->tr_done;
if (!req) {
spin_unlock_irqrestore(&queue->ring_lock, flags);
break;
}
gpd = req->gpd;
if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
spin_unlock_irqrestore(&queue->ring_lock, flags);
break;
}
queue->budget++;
dma_free = req->mapped_buff;
dma_len = le16_to_cpu(gpd->data_buff_len);
skb = req->skb;
req->skb = NULL;
queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
spin_unlock_irqrestore(&queue->ring_lock, flags);
count++;
dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
if (count)
wake_up_nr(&queue->req_wq, count);
return count;
}
static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
{
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
struct cldma_request *req;
dma_addr_t ul_curr_addr;
unsigned long flags;
bool pending_gpd;
if (!(md_ctrl->txq_active & BIT(queue->index)))
return;
spin_lock_irqsave(&queue->ring_lock, flags);
req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
spin_unlock_irqrestore(&queue->ring_lock, flags);
pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
if (pending_gpd) {
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
/* Check current processing TGPD, 64-bit address is in a table by Q index */
ul_curr_addr = ioread64(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
queue->index * sizeof(u64));
if (req->gpd_addr != ul_curr_addr) {
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
md_ctrl->hif_id, queue->index);
return;
}
t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
}
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
static void t7xx_cldma_tx_done(struct work_struct *work)
{
struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
struct t7xx_cldma_hw *hw_info;
unsigned int l2_tx_int;
unsigned long flags;
hw_info = &md_ctrl->hw_info;
t7xx_cldma_gpd_tx_collect(queue);
l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
MTK_TX);
if (l2_tx_int & EQ_STA_BIT(queue->index)) {
t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
t7xx_cldma_txq_empty_hndl(queue);
}
if (l2_tx_int & BIT(queue->index)) {
t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
queue_work(queue->worker, &queue->cldma_work);
return;
}
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
if (md_ctrl->txq_active & BIT(queue->index)) {
t7xx_cldma_clear_ip_busy(hw_info);
t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
}
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
pm_runtime_mark_last_busy(md_ctrl->dev);
pm_runtime_put_autosuspend(md_ctrl->dev);
}
static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
struct cldma_ring *ring, enum dma_data_direction tx_rx)
{
struct cldma_request *req_cur, *req_next;
list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
if (req_cur->mapped_buff && req_cur->skb) {
dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
ring->pkt_size, tx_rx);
req_cur->mapped_buff = 0;
}
dev_kfree_skb_any(req_cur->skb);
if (req_cur->gpd)
dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
list_del(&req_cur->entry);
kfree(req_cur);
}
}
static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
{
struct cldma_request *req;
int val;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
if (!req->gpd)
goto err_free_req;
val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
if (val)
goto err_free_pool;
return req;
err_free_pool:
dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
err_free_req:
kfree(req);
return NULL;
}
static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
{
struct cldma_request *req;
struct cldma_gpd *gpd;
int i;
INIT_LIST_HEAD(&ring->gpd_ring);
ring->length = MAX_RX_BUDGET;
for (i = 0; i < ring->length; i++) {
req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
if (!req) {
t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
return -ENOMEM;
}
gpd = req->gpd;
t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
INIT_LIST_HEAD(&req->entry);
list_add_tail(&req->entry, &ring->gpd_ring);
}
/* Link previous GPD to next GPD, circular */
list_for_each_entry(req, &ring->gpd_ring, entry) {
t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
gpd = req->gpd;
}
return 0;
}
static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
{
struct cldma_request *req;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
if (!req->gpd) {
kfree(req);
return NULL;
}
return req;
}
static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
{
struct cldma_request *req;
struct cldma_gpd *gpd;
int i;
INIT_LIST_HEAD(&ring->gpd_ring);
ring->length = MAX_TX_BUDGET;
for (i = 0; i < ring->length; i++) {
req = t7xx_alloc_tx_request(md_ctrl);
if (!req) {
t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
return -ENOMEM;
}
gpd = req->gpd;
gpd->flags = GPD_FLAGS_IOC;
INIT_LIST_HEAD(&req->entry);
list_add_tail(&req->entry, &ring->gpd_ring);
}
/* Link previous GPD to next GPD, circular */
list_for_each_entry(req, &ring->gpd_ring, entry) {
t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
gpd = req->gpd;
}
return 0;
}
/**
* t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
* @queue: Pointer to the queue structure.
*
* Called with ring_lock (unless called during initialization phase)
*/
static void t7xx_cldma_q_reset(struct cldma_queue *queue)
{
struct cldma_request *req;
req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
queue->tr_done = req;
queue->budget = queue->tr_ring->length;
if (queue->dir == MTK_TX)
queue->tx_next = req;
else
queue->rx_refill = req;
}
static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
{
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
queue->dir = MTK_RX;
queue->tr_ring = &md_ctrl->rx_ring[queue->index];
t7xx_cldma_q_reset(queue);
}
static void t7xx_cldma_txq_init(struct cldma_queue *queue)
{
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
queue->dir = MTK_TX;
queue->tr_ring = &md_ctrl->tx_ring[queue->index];
t7xx_cldma_q_reset(queue);
}
static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
{
t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
}
static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
{
t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
}
static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
{
unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
int i;
/* L2 raw interrupt status */
l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
l2_tx_int &= ~l2_tx_int_msk;
l2_rx_int &= ~l2_rx_int_msk;
if (l2_tx_int) {
if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
/* Read and clear L3 TX interrupt status */
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
}
t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
if (i < CLDMA_TXQ_NUM) {
pm_runtime_get(md_ctrl->dev);
t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
queue_work(md_ctrl->txq[i].worker,
&md_ctrl->txq[i].cldma_work);
} else {
t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
}
}
}
}
if (l2_rx_int) {
if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
/* Read and clear L3 RX interrupt status */
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
}
t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
pm_runtime_get(md_ctrl->dev);
t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
}
}
}
}
static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
unsigned int tx_active;
unsigned int rx_active;
if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
return false;
tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
return tx_active || rx_active;
}
/**
* t7xx_cldma_stop() - Stop CLDMA.
* @md_ctrl: CLDMA context structure.
*
* Stop TX and RX queues. Disable L1 and L2 interrupts.
* Clear status registers.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code from polling cldma_queues_active.
*/
int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
bool active;
int i, ret;
md_ctrl->rxq_active = 0;
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
md_ctrl->txq_active = 0;
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
md_ctrl->txq_started = 0;
t7xx_cldma_disable_irq(md_ctrl);
t7xx_cldma_hw_stop(hw_info, MTK_RX);
t7xx_cldma_hw_stop(hw_info, MTK_TX);
t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
if (md_ctrl->is_late_init) {
for (i = 0; i < CLDMA_TXQ_NUM; i++)
flush_work(&md_ctrl->txq[i].cldma_work);
for (i = 0; i < CLDMA_RXQ_NUM; i++)
flush_work(&md_ctrl->rxq[i].cldma_work);
}
ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
if (ret)
dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
return ret;
}
static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
{
int i;
if (!md_ctrl->is_late_init)
return;
for (i = 0; i < CLDMA_TXQ_NUM; i++)
t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
for (i = 0; i < CLDMA_RXQ_NUM; i++)
t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
dma_pool_destroy(md_ctrl->gpd_dmapool);
md_ctrl->gpd_dmapool = NULL;
md_ctrl->is_late_init = false;
}
void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
{
unsigned long flags;
int i;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
md_ctrl->txq_active = 0;
md_ctrl->rxq_active = 0;
t7xx_cldma_disable_irq(md_ctrl);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
cancel_work_sync(&md_ctrl->txq[i].cldma_work);
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
for (i = 0; i < CLDMA_RXQ_NUM; i++) {
cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
t7xx_cldma_late_release(md_ctrl);
}
/**
* t7xx_cldma_start() - Start CLDMA.
* @md_ctrl: CLDMA context structure.
*
* Set TX/RX start address.
* Start all RX queues and enable L2 interrupt.
*/
void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
{
unsigned long flags;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
if (md_ctrl->is_late_init) {
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
int i;
t7xx_cldma_enable_irq(md_ctrl);
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
if (md_ctrl->txq[i].tr_done)
t7xx_cldma_hw_set_start_addr(hw_info, i,
md_ctrl->txq[i].tr_done->gpd_addr,
MTK_TX);
}
for (i = 0; i < CLDMA_RXQ_NUM; i++) {
if (md_ctrl->rxq[i].tr_done)
t7xx_cldma_hw_set_start_addr(hw_info, i,
md_ctrl->rxq[i].tr_done->gpd_addr,
MTK_RX);
}
/* Enable L2 interrupt */
t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
t7xx_cldma_hw_start(hw_info);
md_ctrl->txq_started = 0;
md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
}
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
{
struct cldma_queue *txq = &md_ctrl->txq[qnum];
struct cldma_request *req;
struct cldma_gpd *gpd;
unsigned long flags;
spin_lock_irqsave(&txq->ring_lock, flags);
t7xx_cldma_q_reset(txq);
list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
gpd = req->gpd;
gpd->flags &= ~GPD_FLAGS_HWO;
t7xx_cldma_gpd_set_data_ptr(gpd, 0);
gpd->data_buff_len = 0;
dev_kfree_skb_any(req->skb);
req->skb = NULL;
}
spin_unlock_irqrestore(&txq->ring_lock, flags);
}
static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
{
struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
struct cldma_request *req;
struct cldma_gpd *gpd;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&rxq->ring_lock, flags);
t7xx_cldma_q_reset(rxq);
list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
gpd = req->gpd;
gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
gpd->data_buff_len = 0;
if (req->skb) {
req->skb->len = 0;
skb_reset_tail_pointer(req->skb);
}
}
list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
if (req->skb)
continue;
ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
if (ret)
break;
t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
}
spin_unlock_irqrestore(&rxq->ring_lock, flags);
return ret;
}
void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
{
int i;
if (tx_rx == MTK_TX) {
for (i = 0; i < CLDMA_TXQ_NUM; i++)
t7xx_cldma_clear_txq(md_ctrl, i);
} else {
for (i = 0; i < CLDMA_RXQ_NUM; i++)
t7xx_cldma_clear_rxq(md_ctrl, i);
}
}
void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
unsigned long flags;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
if (tx_rx == MTK_RX)
md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
else
md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
struct sk_buff *skb)
{
struct cldma_ctrl *md_ctrl = queue->md_ctrl;
struct cldma_gpd *gpd = tx_req->gpd;
unsigned long flags;
/* Update GPD */
tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
dev_err(md_ctrl->dev, "DMA mapping failed\n");
return -ENOMEM;
}
t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
gpd->data_buff_len = cpu_to_le16(skb->len);
/* This lock must cover TGPD setting, as even without a resume operation,
* CLDMA can send next HWO=1 if last TGPD just finished.
*/
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
if (md_ctrl->txq_active & BIT(queue->index))
gpd->flags |= GPD_FLAGS_HWO;
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
tx_req->skb = skb;
return 0;
}
/* Called with cldma_lock */
static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
struct cldma_request *prev_req)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
/* Check whether the device was powered off (CLDMA start address is not set) */
if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
t7xx_cldma_hw_init(hw_info);
t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
md_ctrl->txq_started &= ~BIT(qno);
}
if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
if (md_ctrl->txq_started & BIT(qno))
t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
else
t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
md_ctrl->txq_started |= BIT(qno);
}
}
/**
* t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
* @md_ctrl: CLDMA context structure.
* @recv_skb: Receiving skb callback.
*/
void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{
md_ctrl->recv_skb = recv_skb;
}
/**
* t7xx_cldma_send_skb() - Send control data to modem.
* @md_ctrl: CLDMA context structure.
* @qno: Queue number.
* @skb: Socket buffer.
*
* Return:
* * 0 - Success.
* * -ENOMEM - Allocation failure.
* * -EINVAL - Invalid queue request.
* * -EIO - Queue is not active.
* * -ETIMEDOUT - Timeout waiting for the device to wake up.
*/
int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
{
struct cldma_request *tx_req;
struct cldma_queue *queue;
unsigned long flags;
int ret;
if (qno >= CLDMA_TXQ_NUM)
return -EINVAL;
ret = pm_runtime_resume_and_get(md_ctrl->dev);
if (ret < 0 && ret != -EACCES)
return ret;
t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
queue = &md_ctrl->txq[qno];
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
if (!(md_ctrl->txq_active & BIT(qno))) {
ret = -EIO;
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
goto allow_sleep;
}
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
do {
spin_lock_irqsave(&queue->ring_lock, flags);
tx_req = queue->tx_next;
if (queue->budget > 0 && !tx_req->skb) {
struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
queue->budget--;
t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
spin_unlock_irqrestore(&queue->ring_lock, flags);
if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
ret = -ETIMEDOUT;
break;
}
/* Protect the access to the modem for queues operations (resume/start)
* which access shared locations by all the queues.
* cldma_lock is independent of ring_lock which is per queue.
*/
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
break;
}
spin_unlock_irqrestore(&queue->ring_lock, flags);
if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
ret = -ETIMEDOUT;
break;
}
if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
} while (!ret);
allow_sleep:
t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(md_ctrl->dev);
pm_runtime_put_autosuspend(md_ctrl->dev);
return ret;
}
static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{
char dma_pool_name[32];
int i, j, ret;
if (md_ctrl->is_late_init) {
dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
return -EALREADY;
}
snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
if (!md_ctrl->gpd_dmapool) {
dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
return -ENOMEM;
}
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
if (ret) {
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
if (j == CLDMA_RXQ_NUM - 1)
md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
if (ret) {
dev_err(md_ctrl->dev, "Control RX ring init fail\n");
goto err_free_rx_ring;
}
}
for (i = 0; i < CLDMA_TXQ_NUM; i++)
t7xx_cldma_txq_init(&md_ctrl->txq[i]);
for (j = 0; j < CLDMA_RXQ_NUM; j++)
t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
md_ctrl->is_late_init = true;
return 0;
err_free_rx_ring:
while (j--)
t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
err_free_tx_ring:
while (i--)
t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
return ret;
}
static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
{
return addr + phy_addr - addr_trs1;
}
static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
{
struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
u32 phy_ao_base, phy_pd_base;
hw_info->hw_mode = MODE_BIT_64;
if (md_ctrl->hif_id == CLDMA_ID_MD) {
phy_ao_base = CLDMA1_AO_BASE;
phy_pd_base = CLDMA1_PD_BASE;
hw_info->phy_interrupt_id = CLDMA1_INT;
} else {
phy_ao_base = CLDMA0_AO_BASE;
phy_pd_base = CLDMA0_PD_BASE;
hw_info->phy_interrupt_id = CLDMA0_INT;
}
hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
}
static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
return 0;
}
int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct cldma_ctrl *md_ctrl;
md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
if (!md_ctrl)
return -ENOMEM;
md_ctrl->t7xx_dev = t7xx_dev;
md_ctrl->dev = dev;
md_ctrl->hif_id = hif_id;
md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
t7xx_hw_info_init(md_ctrl);
t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
return 0;
}
static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{
struct cldma_ctrl *md_ctrl = entity_param;
struct t7xx_cldma_hw *hw_info;
unsigned long flags;
int qno_t;
hw_info = &md_ctrl->hw_info;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_restore(hw_info);
for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
MTK_TX);
t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
MTK_RX);
}
t7xx_cldma_enable_irq(md_ctrl);
t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{
struct cldma_ctrl *md_ctrl = entity_param;
unsigned long flags;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
if (md_ctrl->hif_id == CLDMA_ID_MD)
t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
return 0;
}
static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{
struct cldma_ctrl *md_ctrl = entity_param;
struct t7xx_cldma_hw *hw_info;
unsigned long flags;
hw_info = &md_ctrl->hw_info;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
t7xx_cldma_clear_ip_busy(hw_info);
t7xx_cldma_disable_irq(md_ctrl);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
{
struct cldma_ctrl *md_ctrl = entity_param;
struct t7xx_cldma_hw *hw_info;
unsigned long flags;
if (md_ctrl->hif_id == CLDMA_ID_MD)
t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
hw_info = &md_ctrl->hw_info;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
md_ctrl->txq_started = 0;
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
return 0;
}
static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
{
md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
if (!md_ctrl->pm_entity)
return -ENOMEM;
md_ctrl->pm_entity->entity_param = md_ctrl;
if (md_ctrl->hif_id == CLDMA_ID_MD)
md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
else
md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
md_ctrl->pm_entity->resume = t7xx_cldma_resume;
md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
}
static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
{
if (!md_ctrl->pm_entity)
return -EINVAL;
t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
kfree(md_ctrl->pm_entity);
md_ctrl->pm_entity = NULL;
return 0;
}
void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
unsigned long flags;
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_stop(hw_info, MTK_TX);
t7xx_cldma_hw_stop(hw_info, MTK_RX);
t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
t7xx_cldma_hw_init(hw_info);
spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
}
static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
{
struct cldma_ctrl *md_ctrl = data;
u32 interrupt;
interrupt = md_ctrl->hw_info.phy_interrupt_id;
t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
t7xx_cldma_irq_work_cb(md_ctrl);
t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
return IRQ_HANDLED;
}
static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
{
int i;
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
if (md_ctrl->txq[i].worker) {
destroy_workqueue(md_ctrl->txq[i].worker);
md_ctrl->txq[i].worker = NULL;
}
}
for (i = 0; i < CLDMA_RXQ_NUM; i++) {
if (md_ctrl->rxq[i].worker) {
destroy_workqueue(md_ctrl->rxq[i].worker);
md_ctrl->rxq[i].worker = NULL;
}
}
}
/**
* t7xx_cldma_init() - Initialize CLDMA.
* @md_ctrl: CLDMA context structure.
*
* Allocate and initialize device power management entity.
* Initialize HIF TX/RX queue structure.
* Register CLDMA callback ISR with PCIe driver.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code from failure sub-initializations.
*/
int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
{
struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
int ret, i;
md_ctrl->txq_active = 0;
md_ctrl->rxq_active = 0;
md_ctrl->is_late_init = false;
ret = t7xx_cldma_pm_init(md_ctrl);
if (ret)
return ret;
spin_lock_init(&md_ctrl->cldma_lock);
for (i = 0; i < CLDMA_TXQ_NUM; i++) {
md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
md_ctrl->txq[i].worker =
alloc_ordered_workqueue("md_hif%d_tx%d_worker",
WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
md_ctrl->hif_id, i);
if (!md_ctrl->txq[i].worker)
goto err_workqueue;
INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
}
for (i = 0; i < CLDMA_RXQ_NUM; i++) {
md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
md_ctrl->rxq[i].worker =
alloc_ordered_workqueue("md_hif%d_rx%d_worker",
WQ_MEM_RECLAIM,
md_ctrl->hif_id, i);
if (!md_ctrl->rxq[i].worker)
goto err_workqueue;
}
t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
return 0;
err_workqueue:
t7xx_cldma_destroy_wqs(md_ctrl);
t7xx_cldma_pm_uninit(md_ctrl);
return -ENOMEM;
}
void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
{
t7xx_cldma_late_release(md_ctrl);
t7xx_cldma_late_init(md_ctrl);
}
void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
{
t7xx_cldma_stop(md_ctrl);
t7xx_cldma_late_release(md_ctrl);
t7xx_cldma_destroy_wqs(md_ctrl);
t7xx_cldma_pm_uninit(md_ctrl);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_hif_cldma.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <[email protected]>
* Haijun Liu <[email protected]>
* Eliot Lee <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Andy Shevchenko <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
#include "t7xx_hif_dpmaif_rx.h"
#include "t7xx_netdev.h"
#include "t7xx_pci.h"
#define DPMAIF_BAT_COUNT 8192
#define DPMAIF_FRG_COUNT 4814
#define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2)
#define DPMAIF_BAT_CNT_THRESHOLD 30
#define DPMAIF_PIT_CNT_THRESHOLD 60
#define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0)
#define DPMAIF_NOTIFY_RELEASE_COUNT 128
#define DPMAIF_POLL_PIT_TIME_US 20
#define DPMAIF_POLL_PIT_MAX_TIME_US 2000
#define DPMAIF_WQ_TIME_LIMIT_MS 2
#define DPMAIF_CS_RESULT_PASS 0
/* Packet type */
#define DES_PT_PD 0
#define DES_PT_MSG 1
/* Buffer type */
#define PKT_BUF_FRAG 1
static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
{
u32 value;
value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
value <<= 13;
value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
return value;
}
static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
const unsigned int q_num, const unsigned int bat_cnt)
{
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
struct dpmaif_bat_request *bat_req = rxq->bat_req;
unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
if (!rxq->que_started) {
dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
return -EINVAL;
}
old_rl_idx = bat_req->bat_release_rd_idx;
old_wr_idx = bat_req->bat_wr_idx;
new_wr_idx = old_wr_idx + bat_cnt;
if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
goto err_flow;
if (new_wr_idx >= bat_req->bat_size_cnt) {
new_wr_idx -= bat_req->bat_size_cnt;
if (new_wr_idx >= old_rl_idx)
goto err_flow;
}
bat_req->bat_wr_idx = new_wr_idx;
return 0;
err_flow:
dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
return -EINVAL;
}
static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
const unsigned int size, struct dpmaif_bat_skb *cur_skb)
{
dma_addr_t data_bus_addr;
struct sk_buff *skb;
skb = __dev_alloc_skb(size, GFP_KERNEL);
if (!skb)
return false;
data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
dev_kfree_skb_any(skb);
return false;
}
cur_skb->skb = skb;
cur_skb->data_bus_addr = data_bus_addr;
cur_skb->data_len = size;
return true;
}
static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
unsigned int index)
{
struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
if (bat_skb->skb) {
dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
dev_kfree_skb(bat_skb->skb);
bat_skb->skb = NULL;
}
}
/**
* t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
* @dpmaif_ctrl: Pointer to DPMAIF context structure.
* @bat_req: Pointer to BAT request structure.
* @q_num: Queue number.
* @buf_cnt: Number of buffers to allocate.
* @initial: Indicates if the ring is being populated for the first time.
*
* Allocate skb and store the start address of the data buffer into the BAT ring.
* If this is not the initial call, notify the HW about the new entries.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code.
*/
int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
const struct dpmaif_bat_request *bat_req,
const unsigned int q_num, const unsigned int buf_cnt,
const bool initial)
{
unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
int ret;
if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
return -EINVAL;
/* Check BAT buffer space */
bat_max_cnt = bat_req->bat_size_cnt;
bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
bat_req->bat_wr_idx, DPMAIF_WRITE);
if (buf_cnt > bat_cnt)
return -ENOMEM;
bat_start_idx = bat_req->bat_wr_idx;
for (i = 0; i < buf_cnt; i++) {
unsigned int cur_bat_idx = bat_start_idx + i;
struct dpmaif_bat_skb *cur_skb;
struct dpmaif_bat *cur_bat;
if (cur_bat_idx >= bat_max_cnt)
cur_bat_idx -= bat_max_cnt;
cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
if (!cur_skb->skb &&
!t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
break;
cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
}
if (!i)
return -ENOMEM;
ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
if (ret)
goto err_unmap_skbs;
if (!initial) {
unsigned int hw_wr_idx;
ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
if (ret)
goto err_unmap_skbs;
hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
DPF_RX_QNO_DFT);
if (hw_wr_idx != bat_req->bat_wr_idx) {
ret = -EFAULT;
dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
goto err_unmap_skbs;
}
}
return 0;
err_unmap_skbs:
while (--i > 0)
t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
return ret;
}
static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
const unsigned int rel_entry_num)
{
struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
int ret;
if (!rxq->que_started)
return 0;
if (rel_entry_num >= rxq->pit_size_cnt) {
dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
return -EINVAL;
}
old_rel_idx = rxq->pit_release_rd_idx;
new_rel_idx = old_rel_idx + rel_entry_num;
hw_wr_idx = rxq->pit_wr_idx;
if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
new_rel_idx -= rxq->pit_size_cnt;
ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
if (ret) {
dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
return ret;
}
rxq->pit_release_rd_idx = new_rel_idx;
return 0;
}
static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
{
unsigned long flags;
spin_lock_irqsave(&bat_req->mask_lock, flags);
set_bit(idx, bat_req->bat_bitmap);
spin_unlock_irqrestore(&bat_req->mask_lock, flags);
}
static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
const unsigned int cur_bid)
{
struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
struct dpmaif_bat_page *bat_page;
if (cur_bid >= DPMAIF_FRG_COUNT)
return -EINVAL;
bat_page = bat_frag->bat_skb + cur_bid;
if (!bat_page->page)
return -EINVAL;
return 0;
}
static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
unsigned int index)
{
struct dpmaif_bat_page *bat_page = bat_page_base + index;
if (bat_page->page) {
dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
put_page(bat_page->page);
bat_page->page = NULL;
}
}
/**
* t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
* @dpmaif_ctrl: Pointer to DPMAIF context structure.
* @bat_req: Pointer to BAT request structure.
* @buf_cnt: Number of buffers to allocate.
* @initial: Indicates if the ring is being populated for the first time.
*
* Fragment BAT is used when the received packet does not fit in a normal BAT entry.
* This function allocates a page fragment and stores the start address of the page
* into the Fragment BAT ring.
* If this is not the initial call, notify the HW about the new entries.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code.
*/
int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
const unsigned int buf_cnt, const bool initial)
{
unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
int ret = 0, i;
if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
return -EINVAL;
buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
DPMAIF_WRITE);
if (buf_cnt > buf_space) {
dev_err(dpmaif_ctrl->dev,
"Requested more buffers than the space available in RX frag ring\n");
return -EINVAL;
}
for (i = 0; i < buf_cnt; i++) {
struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
struct dpmaif_bat *cur_bat;
dma_addr_t data_base_addr;
if (!cur_page->page) {
unsigned long offset;
struct page *page;
void *data;
data = netdev_alloc_frag(bat_req->pkt_buf_sz);
if (!data)
break;
page = virt_to_head_page(data);
offset = data - page_address(page);
data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
put_page(virt_to_head_page(data));
dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
break;
}
cur_page->page = page;
cur_page->data_bus_addr = data_base_addr;
cur_page->offset = offset;
cur_page->data_len = bat_req->pkt_buf_sz;
}
data_base_addr = cur_page->data_bus_addr;
cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
}
bat_req->bat_wr_idx = cur_bat_idx;
if (!initial)
t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
if (i < buf_cnt) {
ret = -ENOMEM;
if (initial) {
while (--i > 0)
t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
}
}
return ret;
}
static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
const struct dpmaif_pit *pkt_info,
struct sk_buff *skb)
{
unsigned long long data_bus_addr, data_base_addr;
struct device *dev = rxq->dpmaif_ctrl->dev;
struct dpmaif_bat_page *page_info;
unsigned int data_len;
int data_offset;
page_info = rxq->bat_frag->bat_skb;
page_info += t7xx_normal_pit_bid(pkt_info);
dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
if (!page_info->page)
return -EINVAL;
data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
data_base_addr = page_info->data_bus_addr;
data_offset = data_bus_addr - data_base_addr;
data_offset += page_info->offset;
data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
data_offset, data_len, page_info->data_len);
page_info->page = NULL;
page_info->offset = 0;
page_info->data_len = 0;
return 0;
}
static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
const struct dpmaif_pit *pkt_info,
const struct dpmaif_cur_rx_skb_info *skb_info)
{
unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
int ret;
ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
if (ret < 0)
return ret;
ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
if (ret < 0) {
dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
return ret;
}
t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
return 0;
}
static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
{
struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
bat_skb += cur_bid;
if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
return -EINVAL;
return 0;
}
static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
{
return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
}
static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
const struct dpmaif_pit *pit)
{
unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
return -EFAULT;
rxq->expect_pit_seq++;
if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
rxq->expect_pit_seq = 0;
return 0;
}
static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
{
unsigned int zero_index;
unsigned long flags;
spin_lock_irqsave(&bat_req->mask_lock, flags);
zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
bat_req->bat_release_rd_idx);
if (zero_index < bat_req->bat_size_cnt) {
spin_unlock_irqrestore(&bat_req->mask_lock, flags);
return zero_index - bat_req->bat_release_rd_idx;
}
/* limiting the search till bat_release_rd_idx */
zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
spin_unlock_irqrestore(&bat_req->mask_lock, flags);
return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
}
static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
const unsigned int rel_entry_num,
const enum bat_type buf_type)
{
struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
struct dpmaif_bat_request *bat;
unsigned long flags;
if (!rxq->que_started || !rel_entry_num)
return -EINVAL;
if (buf_type == BAT_TYPE_FRAG) {
bat = rxq->bat_frag;
hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
} else {
bat = rxq->bat_req;
hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
}
if (rel_entry_num >= bat->bat_size_cnt)
return -EINVAL;
old_rel_idx = bat->bat_release_rd_idx;
new_rel_idx = old_rel_idx + rel_entry_num;
/* Do not need to release if the queue is empty */
if (bat->bat_wr_idx == old_rel_idx)
return 0;
if (hw_rd_idx >= old_rel_idx) {
if (new_rel_idx > hw_rd_idx)
return -EINVAL;
}
if (new_rel_idx >= bat->bat_size_cnt) {
new_rel_idx -= bat->bat_size_cnt;
if (new_rel_idx > hw_rd_idx)
return -EINVAL;
}
spin_lock_irqsave(&bat->mask_lock, flags);
for (i = 0; i < rel_entry_num; i++) {
unsigned int index = bat->bat_release_rd_idx + i;
if (index >= bat->bat_size_cnt)
index -= bat->bat_size_cnt;
clear_bit(index, bat->bat_bitmap);
}
spin_unlock_irqrestore(&bat->mask_lock, flags);
bat->bat_release_rd_idx = new_rel_idx;
return rel_entry_num;
}
static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
{
int ret;
if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
return 0;
ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
if (ret)
return ret;
rxq->pit_remain_release_cnt = 0;
return 0;
}
static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
{
unsigned int bid_cnt;
int ret;
bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
return 0;
ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
if (ret <= 0) {
dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
return ret;
}
ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
if (ret < 0)
dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
return ret;
}
static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
{
unsigned int bid_cnt;
int ret;
bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
return 0;
ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
if (ret <= 0) {
dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
return ret;
}
return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
}
static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
const struct dpmaif_pit *msg_pit,
struct dpmaif_cur_rx_skb_info *skb_info)
{
int header = le32_to_cpu(msg_pit->header);
skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
}
static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
const struct dpmaif_pit *pkt_info,
struct dpmaif_cur_rx_skb_info *skb_info)
{
unsigned long long data_bus_addr, data_base_addr;
struct device *dev = rxq->dpmaif_ctrl->dev;
struct dpmaif_bat_skb *bat_skb;
unsigned int data_len;
struct sk_buff *skb;
int data_offset;
bat_skb = rxq->bat_req->bat_skb;
bat_skb += t7xx_normal_pit_bid(pkt_info);
dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
data_base_addr = bat_skb->data_bus_addr;
data_offset = data_bus_addr - data_base_addr;
data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
skb = bat_skb->skb;
skb->len = 0;
skb_reset_tail_pointer(skb);
skb_reserve(skb, data_offset);
if (skb->tail + data_len > skb->end) {
dev_err(dev, "No buffer space available\n");
return -ENOBUFS;
}
skb_put(skb, data_len);
skb_info->cur_skb = skb;
bat_skb->skb = NULL;
return 0;
}
static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
const struct dpmaif_pit *pkt_info,
struct dpmaif_cur_rx_skb_info *skb_info)
{
unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
int ret;
ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
if (ret < 0)
return ret;
ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
if (ret < 0) {
dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
return ret;
}
t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
return 0;
}
static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
{
struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
int ret;
queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
ret = t7xx_dpmaif_pit_release_and_add(rxq);
if (ret < 0)
dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
return ret;
}
static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
struct dpmaif_cur_rx_skb_info *skb_info)
{
struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
struct sk_buff *skb = skb_info->cur_skb;
struct t7xx_skb_cb *skb_cb;
u8 netif_id;
skb_info->cur_skb = NULL;
if (skb_info->pit_dp) {
dev_kfree_skb_any(skb);
return;
}
skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
CHECKSUM_NONE;
netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
skb_cb = T7XX_SKB_CB(skb);
skb_cb->netif_idx = netif_id;
skb_cb->rx_pkt_type = skb_info->pkt_type;
dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
}
static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
const unsigned int budget, int *once_more)
{
unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
struct device *dev = rxq->dpmaif_ctrl->dev;
struct dpmaif_cur_rx_skb_info *skb_info;
int ret = 0;
pit_len = rxq->pit_size_cnt;
skb_info = &rxq->rx_data_info;
cur_pit = rxq->pit_rd_idx;
for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
struct dpmaif_pit *pkt_info;
u32 val;
if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
break;
pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
*once_more = 1;
return recv_skb_cnt;
}
val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
if (val == DES_PT_MSG) {
if (skb_info->msg_pit_received)
dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
skb_info->msg_pit_received = true;
t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
} else { /* DES_PT_PD */
val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
if (val != PKT_BUF_FRAG)
ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
else if (!skb_info->cur_skb)
ret = -EINVAL;
else
ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
if (ret < 0) {
skb_info->err_payload = 1;
dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
}
val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
if (!val) {
if (!skb_info->err_payload) {
t7xx_dpmaif_rx_skb(rxq, skb_info);
} else if (skb_info->cur_skb) {
dev_kfree_skb_any(skb_info->cur_skb);
skb_info->cur_skb = NULL;
}
memset(skb_info, 0, sizeof(*skb_info));
recv_skb_cnt++;
}
}
cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
rxq->pit_rd_idx = cur_pit;
rxq->pit_remain_release_cnt++;
if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
ret = t7xx_dpmaifq_rx_notify_hw(rxq);
if (ret < 0)
break;
}
}
if (!ret)
ret = t7xx_dpmaifq_rx_notify_hw(rxq);
if (ret)
return ret;
return recv_skb_cnt;
}
static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
{
unsigned int hw_wr_idx, pit_cnt;
if (!rxq->que_started)
return 0;
hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
DPMAIF_READ);
rxq->pit_wr_idx = hw_wr_idx;
return pit_cnt;
}
static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
const unsigned int q_num,
const unsigned int budget, int *once_more)
{
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
unsigned int cnt;
int ret = 0;
cnt = t7xx_dpmaifq_poll_pit(rxq);
if (!cnt)
return ret;
ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
if (ret < 0)
dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
return ret;
}
int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
{
struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
int ret, once_more = 0, work_done = 0;
atomic_set(&rxq->rx_processing, 1);
/* Ensure rx_processing is changed to 1 before actually begin RX flow */
smp_mb();
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
return work_done;
}
if (!rxq->sleep_lock_pending)
t7xx_pci_disable_sleep(t7xx_dev);
ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
if (!ret) {
napi_complete_done(napi, work_done);
rxq->sleep_lock_pending = true;
napi_reschedule(napi);
return work_done;
}
rxq->sleep_lock_pending = false;
while (work_done < budget) {
int each_budget = budget - work_done;
int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
each_budget, &once_more);
if (rx_cnt > 0)
work_done += rx_cnt;
else
break;
}
if (once_more) {
napi_gro_flush(napi, false);
work_done = budget;
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
} else if (work_done < budget) {
napi_complete_done(napi, work_done);
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
atomic_set(&rxq->rx_processing, 0);
} else {
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
}
return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
{
struct dpmaif_rx_queue *rxq;
struct dpmaif_ctrl *ctrl;
int qno, ret;
qno = ffs(que_mask) - 1;
if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
return;
}
rxq = &dpmaif_ctrl->rxq[qno];
ctrl = rxq->dpmaif_ctrl;
/* We need to make sure that the modem has been resumed before
* calling napi. This can't be done inside the polling function
* as we could be blocked waiting for device to be resumed,
* which can't be done from softirq context the poll function
* is running in.
*/
ret = pm_runtime_resume_and_get(ctrl->dev);
if (ret < 0 && ret != -EACCES) {
dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
return;
}
napi_schedule(&rxq->napi);
}
static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
const struct dpmaif_bat_request *bat_req)
{
if (bat_req->bat_base)
dma_free_coherent(dpmaif_ctrl->dev,
bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
bat_req->bat_base, bat_req->bat_bus_addr);
}
/**
* t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
* @dpmaif_ctrl: Pointer to DPMAIF context structure.
* @bat_req: Pointer to BAT request structure.
* @buf_type: BAT ring type.
*
* This function allocates the BAT ring buffer shared with the HW device, also allocates
* a buffer used to store information about the BAT skbs for further release.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code.
*/
int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
const enum bat_type buf_type)
{
int sw_buf_size;
if (buf_type == BAT_TYPE_FRAG) {
sw_buf_size = sizeof(struct dpmaif_bat_page);
bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
} else {
sw_buf_size = sizeof(struct dpmaif_bat_skb);
bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
}
bat_req->type = buf_type;
bat_req->bat_wr_idx = 0;
bat_req->bat_release_rd_idx = 0;
bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
&bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
if (!bat_req->bat_base)
return -ENOMEM;
/* For AP SW to record skb information */
bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
GFP_KERNEL);
if (!bat_req->bat_skb)
goto err_free_dma_mem;
bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
if (!bat_req->bat_bitmap)
goto err_free_dma_mem;
spin_lock_init(&bat_req->mask_lock);
atomic_set(&bat_req->refcnt, 0);
return 0;
err_free_dma_mem:
t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
return -ENOMEM;
}
void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
{
if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
return;
bitmap_free(bat_req->bat_bitmap);
bat_req->bat_bitmap = NULL;
if (bat_req->bat_skb) {
unsigned int i;
for (i = 0; i < bat_req->bat_size_cnt; i++) {
if (bat_req->type == BAT_TYPE_FRAG)
t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
else
t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
}
}
t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
}
static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
{
rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
rxq->pit_rd_idx = 0;
rxq->pit_wr_idx = 0;
rxq->pit_release_rd_idx = 0;
rxq->expect_pit_seq = 0;
rxq->pit_remain_release_cnt = 0;
memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
&rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
if (!rxq->pit_base)
return -ENOMEM;
rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
atomic_inc(&rxq->bat_req->refcnt);
rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
atomic_inc(&rxq->bat_frag->refcnt);
return 0;
}
static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
{
if (!rxq->dpmaif_ctrl)
return;
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
if (rxq->pit_base)
dma_free_coherent(rxq->dpmaif_ctrl->dev,
rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
rxq->pit_base, rxq->pit_bus_addr);
}
int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
{
int ret;
ret = t7xx_dpmaif_rx_alloc(queue);
if (ret < 0)
dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
return ret;
}
void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
{
t7xx_dpmaif_rx_buf_free(queue);
}
static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
{
struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
struct dpmaif_rx_queue *rxq;
int ret;
ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
if (ret < 0 && ret != -EACCES)
return;
t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
t7xx_dpmaif_bat_release_and_add(rxq);
t7xx_dpmaif_frag_bat_release_and_add(rxq);
}
t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
{
dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
WQ_MEM_RECLAIM, 1);
if (!dpmaif_ctrl->bat_release_wq)
return -ENOMEM;
INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
return 0;
}
void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
{
flush_work(&dpmaif_ctrl->bat_release_work);
if (dpmaif_ctrl->bat_release_wq) {
destroy_workqueue(dpmaif_ctrl->bat_release_wq);
dpmaif_ctrl->bat_release_wq = NULL;
}
}
/**
* t7xx_dpmaif_rx_stop() - Suspend RX flow.
* @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
*
* Wait for all the RX work to finish executing and mark the RX queue as paused.
*/
void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
{
unsigned int i;
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
int timeout, value;
timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
!value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
if (timeout)
dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
/* Ensure RX processing has stopped before we set rxq->que_started to false */
smp_mb();
rxq->que_started = false;
}
}
static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
{
int cnt, j = 0;
rxq->que_started = false;
do {
cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
rxq->pit_wr_idx, DPMAIF_READ);
if (++j >= DPMAIF_MAX_CHECK_COUNT) {
dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
break;
}
} while (cnt);
memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
rxq->pit_rd_idx = 0;
rxq->pit_wr_idx = 0;
rxq->pit_release_rd_idx = 0;
rxq->expect_pit_seq = 0;
rxq->pit_remain_release_cnt = 0;
rxq->bat_req->bat_release_rd_idx = 0;
rxq->bat_req->bat_wr_idx = 0;
rxq->bat_frag->bat_release_rd_idx = 0;
rxq->bat_frag->bat_wr_idx = 0;
}
void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
{
int i;
for (i = 0; i < DPMAIF_RXQ_NUM; i++)
t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <[email protected]>
* Eliot Lee <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Amir Hanania <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
#include "t7xx_hif_cldma.h"
#include "t7xx_mhccif.h"
#include "t7xx_modem_ops.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_port_proxy.h"
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"
#define FSM_DRM_DISABLE_DELAY_MS 200
#define FSM_EVENT_POLL_INTERVAL_MS 20
#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000
#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
#define FSM_CMD_TIMEOUT_MS 2000
void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
unsigned long flags;
spin_lock_irqsave(&ctl->notifier_lock, flags);
list_add_tail(¬ifier->entry, &ctl->notifier_list);
spin_unlock_irqrestore(&ctl->notifier_lock, flags);
}
void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
{
struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
unsigned long flags;
spin_lock_irqsave(&ctl->notifier_lock, flags);
list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
if (notifier_cur == notifier)
list_del(¬ifier->entry);
}
spin_unlock_irqrestore(&ctl->notifier_lock, flags);
}
static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
struct t7xx_fsm_notifier *notifier;
unsigned long flags;
spin_lock_irqsave(&ctl->notifier_lock, flags);
list_for_each_entry(notifier, &ctl->notifier_list, entry) {
spin_unlock_irqrestore(&ctl->notifier_lock, flags);
if (notifier->notifier_fn)
notifier->notifier_fn(state, notifier->data);
spin_lock_irqsave(&ctl->notifier_lock, flags);
}
spin_unlock_irqrestore(&ctl->notifier_lock, flags);
}
void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
{
ctl->md_state = state;
/* Update to port first, otherwise sending message on HS2 may fail */
t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
fsm_state_notify(ctl->md, state);
}
static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
{
if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
*cmd->ret = result;
complete_all(cmd->done);
}
kfree(cmd);
}
static void fsm_del_kf_event(struct t7xx_fsm_event *event)
{
list_del(&event->entry);
kfree(event);
}
static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
{
struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
struct t7xx_fsm_event *event, *evt_next;
struct t7xx_fsm_command *cmd, *cmd_next;
unsigned long flags;
spin_lock_irqsave(&ctl->command_lock, flags);
list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
list_del(&cmd->entry);
fsm_finish_command(ctl, cmd, -EINVAL);
}
spin_unlock_irqrestore(&ctl->command_lock, flags);
spin_lock_irqsave(&ctl->event_lock, flags);
list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
dev_warn(dev, "Unhandled event %d\n", event->event_id);
fsm_del_kf_event(event);
}
spin_unlock_irqrestore(&ctl->event_lock, flags);
}
static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
enum t7xx_fsm_event_state event_ignore, int retries)
{
struct t7xx_fsm_event *event;
bool event_received = false;
unsigned long flags;
int cnt = 0;
while (cnt++ < retries && !event_received) {
bool sleep_required = true;
if (kthread_should_stop())
return;
spin_lock_irqsave(&ctl->event_lock, flags);
event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
if (event) {
event_received = event->event_id == event_expected;
if (event_received || event->event_id == event_ignore) {
fsm_del_kf_event(event);
sleep_required = false;
}
}
spin_unlock_irqrestore(&ctl->event_lock, flags);
if (sleep_required)
msleep(FSM_EVENT_POLL_INTERVAL_MS);
}
}
static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
enum t7xx_ex_reason reason)
{
struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
if (cmd)
fsm_finish_command(ctl, cmd, -EINVAL);
return;
}
ctl->curr_state = FSM_STATE_EXCEPTION;
switch (reason) {
case EXCEPTION_HS_TIMEOUT:
dev_err(dev, "Boot Handshake failure\n");
break;
case EXCEPTION_EVENT:
dev_err(dev, "Exception event\n");
t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
t7xx_md_exception_handshake(ctl->md);
fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
break;
default:
dev_err(dev, "Exception %d\n", reason);
break;
}
if (cmd)
fsm_finish_command(ctl, cmd, 0);
}
static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
{
ctl->curr_state = FSM_STATE_STOPPED;
t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
return t7xx_md_reset(ctl->md->t7xx_dev);
}
static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
if (ctl->curr_state == FSM_STATE_STOPPED) {
fsm_finish_command(ctl, cmd, -EINVAL);
return;
}
fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
}
static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
struct t7xx_pci_dev *t7xx_dev;
struct cldma_ctrl *md_ctrl;
int err;
if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
fsm_finish_command(ctl, cmd, -EINVAL);
return;
}
md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
t7xx_dev = ctl->md->t7xx_dev;
ctl->curr_state = FSM_STATE_STOPPING;
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
t7xx_cldma_stop(md_ctrl);
if (!ctl->md->rgu_irq_asserted) {
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
/* Wait for the DRM disable to take effect */
msleep(FSM_DRM_DISABLE_DELAY_MS);
err = t7xx_acpi_fldr_func(t7xx_dev);
if (err)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
}
fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
}
static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
{
if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
return;
ctl->md_state = MD_STATE_READY;
fsm_state_notify(ctl->md, MD_STATE_READY);
t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
}
static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
{
struct t7xx_modem *md = ctl->md;
ctl->curr_state = FSM_STATE_READY;
t7xx_fsm_broadcast_ready_state(ctl);
t7xx_md_event_notify(md, FSM_READY);
}
static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
{
struct t7xx_modem *md = ctl->md;
struct device *dev;
ctl->curr_state = FSM_STATE_STARTING;
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
t7xx_md_event_notify(md, FSM_START);
wait_event_interruptible_timeout(ctl->async_hk_wq,
(md->core_md.ready && md->core_ap.ready) ||
ctl->exp_flg, HZ * 60);
dev = &md->t7xx_dev->pdev->dev;
if (ctl->exp_flg)
dev_err(dev, "MD exception is captured during handshake\n");
if (!md->core_md.ready) {
dev_err(dev, "MD handshake timeout\n");
if (md->core_md.handshake_ongoing)
t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
return -ETIMEDOUT;
} else if (!md->core_ap.ready) {
dev_err(dev, "AP handshake timeout\n");
if (md->core_ap.handshake_ongoing)
t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
return -ETIMEDOUT;
}
t7xx_pci_pm_init_late(md->t7xx_dev);
fsm_routine_ready(ctl);
return 0;
}
static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
{
struct t7xx_modem *md = ctl->md;
u32 dev_status;
int ret;
if (!md)
return;
if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
ctl->curr_state != FSM_STATE_STOPPED) {
fsm_finish_command(ctl, cmd, -EINVAL);
return;
}
ctl->curr_state = FSM_STATE_PRE_START;
t7xx_md_event_notify(md, FSM_PRE_START);
ret = read_poll_timeout(ioread32, dev_status,
(dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
if (ret) {
struct device *dev = &md->t7xx_dev->pdev->dev;
fsm_finish_command(ctl, cmd, -ETIMEDOUT);
dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
return;
}
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
}
static int fsm_main_thread(void *data)
{
struct t7xx_fsm_ctl *ctl = data;
struct t7xx_fsm_command *cmd;
unsigned long flags;
while (!kthread_should_stop()) {
if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
kthread_should_stop()))
continue;
if (kthread_should_stop())
break;
spin_lock_irqsave(&ctl->command_lock, flags);
cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
list_del(&cmd->entry);
spin_unlock_irqrestore(&ctl->command_lock, flags);
switch (cmd->cmd_id) {
case FSM_CMD_START:
fsm_routine_start(ctl, cmd);
break;
case FSM_CMD_EXCEPTION:
fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
break;
case FSM_CMD_PRE_STOP:
fsm_routine_stopping(ctl, cmd);
break;
case FSM_CMD_STOP:
fsm_routine_stopped(ctl, cmd);
break;
default:
fsm_finish_command(ctl, cmd, -EINVAL);
fsm_flush_event_cmd_qs(ctl);
break;
}
}
return 0;
}
int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
{
DECLARE_COMPLETION_ONSTACK(done);
struct t7xx_fsm_command *cmd;
unsigned long flags;
int ret;
cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
if (!cmd)
return -ENOMEM;
INIT_LIST_HEAD(&cmd->entry);
cmd->cmd_id = cmd_id;
cmd->flag = flag;
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
cmd->done = &done;
cmd->ret = &ret;
}
spin_lock_irqsave(&ctl->command_lock, flags);
list_add_tail(&cmd->entry, &ctl->command_queue);
spin_unlock_irqrestore(&ctl->command_lock, flags);
wake_up(&ctl->command_wq);
if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
unsigned long wait_ret;
wait_ret = wait_for_completion_timeout(&done,
msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
if (!wait_ret)
return -ETIMEDOUT;
return ret;
}
return 0;
}
int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
unsigned char *data, unsigned int length)
{
struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
struct t7xx_fsm_event *event;
unsigned long flags;
if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
dev_err(dev, "Invalid event %d\n", event_id);
return -EINVAL;
}
event = kmalloc(sizeof(*event) + length, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
if (!event)
return -ENOMEM;
INIT_LIST_HEAD(&event->entry);
event->event_id = event_id;
event->length = length;
if (data && length)
memcpy(event->data, data, length);
spin_lock_irqsave(&ctl->event_lock, flags);
list_add_tail(&event->entry, &ctl->event_queue);
spin_unlock_irqrestore(&ctl->event_lock, flags);
wake_up_all(&ctl->event_wq);
return 0;
}
void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
{
struct t7xx_fsm_event *event, *evt_next;
unsigned long flags;
spin_lock_irqsave(&ctl->event_lock, flags);
list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
if (event->event_id == event_id)
fsm_del_kf_event(event);
}
spin_unlock_irqrestore(&ctl->event_lock, flags);
}
enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
{
if (ctl)
return ctl->md_state;
return MD_STATE_INVALID;
}
unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
{
if (ctl)
return ctl->curr_state;
return FSM_STATE_STOPPED;
}
int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
{
unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
if (type == MD_IRQ_PORT_ENUM) {
return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
} else if (type == MD_IRQ_CCIF_EX) {
ctl->exp_flg = true;
wake_up(&ctl->async_hk_wq);
cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
}
return -EINVAL;
}
void t7xx_fsm_reset(struct t7xx_modem *md)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
fsm_flush_event_cmd_qs(ctl);
ctl->curr_state = FSM_STATE_STOPPED;
ctl->exp_flg = false;
}
int t7xx_fsm_init(struct t7xx_modem *md)
{
struct device *dev = &md->t7xx_dev->pdev->dev;
struct t7xx_fsm_ctl *ctl;
ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return -ENOMEM;
md->fsm_ctl = ctl;
ctl->md = md;
ctl->curr_state = FSM_STATE_INIT;
INIT_LIST_HEAD(&ctl->command_queue);
INIT_LIST_HEAD(&ctl->event_queue);
init_waitqueue_head(&ctl->async_hk_wq);
init_waitqueue_head(&ctl->event_wq);
INIT_LIST_HEAD(&ctl->notifier_list);
init_waitqueue_head(&ctl->command_wq);
spin_lock_init(&ctl->event_lock);
spin_lock_init(&ctl->command_lock);
ctl->exp_flg = false;
spin_lock_init(&ctl->notifier_lock);
ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
return PTR_ERR_OR_ZERO(ctl->fsm_thread);
}
void t7xx_fsm_uninit(struct t7xx_modem *md)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
if (!ctl)
return;
if (ctl->fsm_thread)
kthread_stop(ctl->fsm_thread);
fsm_flush_event_cmd_qs(ctl);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_state_monitor.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <[email protected]>
* Eliot Lee <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Amir Hanania <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/irqreturn.h>
#include <linux/kthread.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_cldma.h"
#include "t7xx_hif_cldma.h"
#include "t7xx_mhccif.h"
#include "t7xx_modem_ops.h"
#include "t7xx_netdev.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_port.h"
#include "t7xx_port_proxy.h"
#include "t7xx_reg.h"
#include "t7xx_state_monitor.h"
#define RT_ID_MD_PORT_ENUM 0
#define RT_ID_AP_PORT_ENUM 1
/* Modem feature query identification code - "ICCC" */
#define MD_FEATURE_QUERY_ID 0x49434343
#define FEATURE_VER GENMASK(7, 4)
#define FEATURE_MSK GENMASK(3, 0)
#define RGU_RESET_DELAY_MS 10
#define PORT_RESET_DELAY_MS 2000
#define EX_HS_TIMEOUT_MS 5000
#define EX_HS_POLL_DELAY_MS 10
enum mtk_feature_support_type {
MTK_FEATURE_DOES_NOT_EXIST,
MTK_FEATURE_NOT_SUPPORTED,
MTK_FEATURE_MUST_BE_SUPPORTED,
};
static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
{
return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
}
/**
* t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
* @t7xx_dev: MTK device.
*
* Check the interrupt status and queue commands accordingly.
*
* Returns:
** 0 - Success.
** -EINVAL - Failure to get FSM control.
*/
int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
{
struct t7xx_modem *md = t7xx_dev->md;
struct t7xx_fsm_ctl *ctl;
unsigned int int_sta;
int ret = 0;
u32 mask;
ctl = md->fsm_ctl;
if (!ctl) {
dev_err_ratelimited(&t7xx_dev->pdev->dev,
"MHCCIF interrupt received before initializing MD monitor\n");
return -EINVAL;
}
spin_lock_bh(&md->exp_lock);
int_sta = t7xx_get_interrupt_status(t7xx_dev);
md->exp_id |= int_sta;
if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
if (ctl->md_state == MD_STATE_INVALID ||
ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
ctl->md_state == MD_STATE_READY) {
md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
}
} else if (md->exp_id & D2H_INT_PORT_ENUM) {
md->exp_id &= ~D2H_INT_PORT_ENUM;
if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
ctl->curr_state == FSM_STATE_STOPPED)
ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
} else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
mask = t7xx_mhccif_mask_get(t7xx_dev);
if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
queue_work(md->handshake_wq, &md->handshake_work);
}
}
spin_unlock_bh(&md->exp_lock);
return ret;
}
static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
{
struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
void __iomem *reset_pcie_reg;
u32 val;
reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
pbase_addr->pcie_dev_reg_trsl_addr;
val = ioread32(reset_pcie_reg);
iowrite32(val, reset_pcie_reg);
}
void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
{
/* Clear L2 */
t7xx_clr_device_irq_via_pcie(t7xx_dev);
/* Clear L1 */
t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
}
static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
{
#ifdef CONFIG_ACPI
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct device *dev = &t7xx_dev->pdev->dev;
acpi_status acpi_ret;
acpi_handle handle;
handle = ACPI_HANDLE(dev);
if (!handle) {
dev_err(dev, "ACPI handle not found\n");
return -EFAULT;
}
if (!acpi_has_method(handle, fn_name)) {
dev_err(dev, "%s method not found\n", fn_name);
return -EFAULT;
}
acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
if (ACPI_FAILURE(acpi_ret)) {
dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
return -EFAULT;
}
kfree(buffer.pointer);
#endif
return 0;
}
int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
{
return t7xx_acpi_reset(t7xx_dev, "_RST");
}
static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
{
u32 val;
val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
if (val & MISC_RESET_TYPE_PLDR)
t7xx_acpi_reset(t7xx_dev, "MRST._RST");
else if (val & MISC_RESET_TYPE_FLDR)
t7xx_acpi_fldr_func(t7xx_dev);
}
static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
msleep(RGU_RESET_DELAY_MS);
t7xx_reset_device_via_pmic(t7xx_dev);
return IRQ_HANDLED;
}
static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
{
struct t7xx_pci_dev *t7xx_dev = data;
struct t7xx_modem *modem;
t7xx_clear_rgu_irq(t7xx_dev);
if (!t7xx_dev->rgu_pci_irq_en)
return IRQ_HANDLED;
modem = t7xx_dev->md;
modem->rgu_irq_asserted = true;
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
return IRQ_WAKE_THREAD;
}
static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
{
/* Registers RGU callback ISR with PCIe driver */
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
}
/**
* t7xx_cldma_exception() - CLDMA exception handler.
* @md_ctrl: modem control struct.
* @stage: exception stage.
*
* Part of the modem exception recovery.
* Stages are one after the other as describe below:
* HIF_EX_INIT: Disable and clear TXQ.
* HIF_EX_CLEARQ_DONE: Disable RX, flush TX/RX workqueues and clear RX.
* HIF_EX_ALLQ_RESET: HW is back in safe mode for re-initialization and restart.
*/
/* Modem Exception Handshake Flow
*
* Modem HW Exception interrupt received
* (MD_IRQ_CCIF_EX)
* |
* +---------v--------+
* | HIF_EX_INIT | : Disable and clear TXQ
* +------------------+
* |
* +---------v--------+
* | HIF_EX_INIT_DONE | : Wait for the init to be done
* +------------------+
* |
* +---------v--------+
* |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
* +------------------+ : Flush TX/RX workqueues
* |
* +---------v--------+
* |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
* +------------------+
*/
static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
{
switch (stage) {
case HIF_EX_INIT:
t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
break;
case HIF_EX_CLEARQ_DONE:
/* We do not want to get CLDMA IRQ when MD is
* resetting CLDMA after it got clearq_ack.
*/
t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
t7xx_cldma_stop(md_ctrl);
if (md_ctrl->hif_id == CLDMA_ID_MD)
t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
break;
case HIF_EX_ALLQ_RESET:
t7xx_cldma_hw_init(&md_ctrl->hw_info);
t7xx_cldma_start(md_ctrl);
break;
default:
break;
}
}
static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
{
struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
if (stage == HIF_EX_CLEARQ_DONE) {
/* Give DHL time to flush data */
msleep(PORT_RESET_DELAY_MS);
t7xx_port_proxy_reset(md->port_prox);
}
t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_AP], stage);
if (stage == HIF_EX_INIT)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
else if (stage == HIF_EX_CLEARQ_DONE)
t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
}
static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
{
unsigned int waited_time_ms = 0;
do {
if (md->exp_id & event_id)
return 0;
waited_time_ms += EX_HS_POLL_DELAY_MS;
msleep(EX_HS_POLL_DELAY_MS);
} while (waited_time_ms < EX_HS_TIMEOUT_MS);
return -EFAULT;
}
static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
{
/* Register the MHCCIF ISR for MD exception, port enum and
* async handshake notifications.
*/
t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
/* Register RGU IRQ handler for sAP exception notification */
t7xx_dev->rgu_pci_irq_en = true;
t7xx_pcie_register_rgu_isr(t7xx_dev);
}
struct feature_query {
__le32 head_pattern;
u8 feature_set[FEATURE_COUNT];
__le32 tail_pattern;
};
static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
{
struct feature_query *ft_query;
struct sk_buff *skb;
skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
if (!skb)
return;
ft_query = skb_put(skb, sizeof(*ft_query));
ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
/* Send HS1 message to device */
t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
}
static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
void *data)
{
struct feature_query *md_feature = data;
struct mtk_runtime_feature *rt_feature;
unsigned int i, rt_data_len = 0;
struct sk_buff *skb;
/* Parse MD runtime data query */
if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
le32_to_cpu(md_feature->head_pattern),
le32_to_cpu(md_feature->tail_pattern));
return -EINVAL;
}
for (i = 0; i < FEATURE_COUNT; i++) {
if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
MTK_FEATURE_MUST_BE_SUPPORTED)
rt_data_len += sizeof(*rt_feature);
}
skb = t7xx_ctrl_alloc_skb(rt_data_len);
if (!skb)
return -ENOMEM;
rt_feature = skb_put(skb, rt_data_len);
memset(rt_feature, 0, rt_data_len);
/* Fill runtime feature */
for (i = 0; i < FEATURE_COUNT; i++) {
u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
continue;
rt_feature->feature_id = i;
if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
rt_feature->support_info = md_feature->feature_set[i];
rt_feature++;
}
/* Send HS3 message to device */
t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
return 0;
}
static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
struct device *dev, void *data, int data_length)
{
enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
struct mtk_runtime_feature *rt_feature;
int i, offset;
offset = sizeof(struct feature_query);
for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
rt_feature = data + offset;
offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
continue;
ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
return -EINVAL;
if (i == RT_ID_MD_PORT_ENUM || i == RT_ID_AP_PORT_ENUM)
t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
}
return 0;
}
static int t7xx_core_reset(struct t7xx_modem *md)
{
struct device *dev = &md->t7xx_dev->pdev->dev;
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
md->core_md.ready = false;
if (!ctl) {
dev_err(dev, "FSM is not initialized\n");
return -EINVAL;
}
if (md->core_md.handshake_ongoing) {
int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
if (ret)
return ret;
}
md->core_md.handshake_ongoing = false;
return 0;
}
static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_sys_info *core_info,
struct t7xx_fsm_ctl *ctl,
enum t7xx_fsm_event_state event_id,
enum t7xx_fsm_event_state err_detect)
{
struct t7xx_fsm_event *event = NULL, *event_next;
struct device *dev = &md->t7xx_dev->pdev->dev;
unsigned long flags;
int ret;
t7xx_prepare_host_rt_data_query(core_info);
while (!kthread_should_stop()) {
bool event_received = false;
spin_lock_irqsave(&ctl->event_lock, flags);
list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
if (event->event_id == err_detect) {
list_del(&event->entry);
spin_unlock_irqrestore(&ctl->event_lock, flags);
dev_err(dev, "Core handshake error event received\n");
goto err_free_event;
} else if (event->event_id == event_id) {
list_del(&event->entry);
event_received = true;
break;
}
}
spin_unlock_irqrestore(&ctl->event_lock, flags);
if (event_received)
break;
wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
kthread_should_stop());
if (kthread_should_stop())
goto err_free_event;
}
if (!event || ctl->exp_flg)
goto err_free_event;
ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
if (ret) {
dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
goto err_free_event;
}
if (ctl->exp_flg)
goto err_free_event;
ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
if (ret) {
dev_err(dev, "Device failure parsing runtime data: %d", ret);
goto err_free_event;
}
core_info->ready = true;
core_info->handshake_ongoing = false;
wake_up(&ctl->async_hk_wq);
err_free_event:
kfree(event);
}
static void t7xx_md_hk_wq(struct work_struct *work)
{
struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
/* Clear the HS2 EXIT event appended in core_reset() */
t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
md->core_md.handshake_ongoing = true;
t7xx_core_hk_handler(md, &md->core_md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
}
static void t7xx_ap_hk_wq(struct work_struct *work)
{
struct t7xx_modem *md = container_of(work, struct t7xx_modem, ap_handshake_work);
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
/* Clear the HS2 EXIT event appended in t7xx_core_reset(). */
t7xx_fsm_clr_event(ctl, FSM_EVENT_AP_HS2_EXIT);
t7xx_cldma_stop(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_start(md->md_ctrl[CLDMA_ID_AP]);
md->core_ap.handshake_ongoing = true;
t7xx_core_hk_handler(md, &md->core_ap, ctl, FSM_EVENT_AP_HS2, FSM_EVENT_AP_HS2_EXIT);
}
void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
{
struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
unsigned int int_sta;
unsigned long flags;
switch (evt_id) {
case FSM_PRE_START:
t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM | D2H_INT_ASYNC_MD_HK |
D2H_INT_ASYNC_AP_HK);
break;
case FSM_START:
t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
spin_lock_irqsave(&md->exp_lock, flags);
int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
md->exp_id |= int_sta;
if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
ctl->exp_flg = true;
md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
} else if (ctl->exp_flg) {
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
} else {
void __iomem *mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
queue_work(md->handshake_wq, &md->handshake_work);
md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
}
if (md->exp_id & D2H_INT_ASYNC_AP_HK) {
queue_work(md->handshake_wq, &md->ap_handshake_work);
md->exp_id &= ~D2H_INT_ASYNC_AP_HK;
iowrite32(D2H_INT_ASYNC_AP_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
}
}
spin_unlock_irqrestore(&md->exp_lock, flags);
t7xx_mhccif_mask_clr(md->t7xx_dev,
D2H_INT_EXCEPTION_INIT |
D2H_INT_EXCEPTION_INIT_DONE |
D2H_INT_EXCEPTION_CLEARQ_DONE |
D2H_INT_EXCEPTION_ALLQ_RESET);
break;
case FSM_READY:
t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_AP_HK);
break;
default:
break;
}
}
void t7xx_md_exception_handshake(struct t7xx_modem *md)
{
struct device *dev = &md->t7xx_dev->pdev->dev;
int ret;
t7xx_md_exception(md, HIF_EX_INIT);
ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
if (ret)
dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
t7xx_md_exception(md, HIF_EX_INIT_DONE);
ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
if (ret)
dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
if (ret)
dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
}
static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct t7xx_modem *md;
md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
if (!md)
return NULL;
md->t7xx_dev = t7xx_dev;
t7xx_dev->md = md;
spin_lock_init(&md->exp_lock);
md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
0, "md_hk_wq");
if (!md->handshake_wq)
return NULL;
INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
INIT_WORK(&md->ap_handshake_work, t7xx_ap_hk_wq);
md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] &= ~FEATURE_MSK;
md->core_ap.feature_set[RT_ID_AP_PORT_ENUM] |=
FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
return md;
}
int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
{
struct t7xx_modem *md = t7xx_dev->md;
md->md_init_finish = false;
md->exp_id = 0;
t7xx_fsm_reset(md);
t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_AP]);
t7xx_port_proxy_reset(md->port_prox);
md->md_init_finish = true;
return t7xx_core_reset(md);
}
/**
* t7xx_md_init() - Initialize modem.
* @t7xx_dev: MTK device.
*
* Allocate and initialize MD control block, and initialize data path.
* Register MHCCIF ISR and RGU ISR, and start the state machine.
*
* Return:
** 0 - Success.
** -ENOMEM - Allocation failure.
*/
int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
{
struct t7xx_modem *md;
int ret;
md = t7xx_md_alloc(t7xx_dev);
if (!md)
return -ENOMEM;
ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
if (ret)
goto err_destroy_hswq;
ret = t7xx_cldma_alloc(CLDMA_ID_AP, t7xx_dev);
if (ret)
goto err_destroy_hswq;
ret = t7xx_fsm_init(md);
if (ret)
goto err_destroy_hswq;
ret = t7xx_ccmni_init(t7xx_dev);
if (ret)
goto err_uninit_fsm;
ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
if (ret)
goto err_uninit_ccmni;
ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_AP]);
if (ret)
goto err_uninit_md_cldma;
ret = t7xx_port_proxy_init(md);
if (ret)
goto err_uninit_ap_cldma;
ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
if (ret) /* t7xx_fsm_uninit() flushes cmd queue */
goto err_uninit_proxy;
t7xx_md_sys_sw_init(t7xx_dev);
md->md_init_finish = true;
return 0;
err_uninit_proxy:
t7xx_port_proxy_uninit(md->port_prox);
err_uninit_ap_cldma:
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
err_uninit_md_cldma:
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
err_uninit_ccmni:
t7xx_ccmni_exit(t7xx_dev);
err_uninit_fsm:
t7xx_fsm_uninit(md);
err_destroy_hswq:
destroy_workqueue(md->handshake_wq);
dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
return ret;
}
void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
{
struct t7xx_modem *md = t7xx_dev->md;
t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
if (!md->md_init_finish)
return;
t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
t7xx_port_proxy_uninit(md->port_prox);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_AP]);
t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
t7xx_ccmni_exit(t7xx_dev);
t7xx_fsm_uninit(md);
destroy_workqueue(md->handshake_wq);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_modem_ops.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <[email protected]>
* Chandrashekar Devegowda <[email protected]>
* Haijun Liu <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Andy Shevchenko <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Eliot Lee <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/dev_printk.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/wwan.h>
#include "t7xx_port.h"
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
static int t7xx_port_ctrl_start(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
if (atomic_read(&port_mtk->usage_cnt))
return -EBUSY;
atomic_inc(&port_mtk->usage_cnt);
return 0;
}
static void t7xx_port_ctrl_stop(struct wwan_port *port)
{
struct t7xx_port *port_mtk = wwan_port_get_drvdata(port);
atomic_dec(&port_mtk->usage_cnt);
}
static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct t7xx_port *port_private = wwan_port_get_drvdata(port);
const struct t7xx_port_conf *port_conf;
struct sk_buff *cur = skb, *cloned;
struct t7xx_fsm_ctl *ctl;
enum md_state md_state;
int cnt = 0, ret;
if (!port_private->chan_enable)
return -EINVAL;
port_conf = port_private->port_conf;
ctl = port_private->t7xx_dev->md->fsm_ctl;
md_state = t7xx_fsm_get_md_state(ctl);
if (md_state == MD_STATE_WAITING_FOR_HS1 || md_state == MD_STATE_WAITING_FOR_HS2) {
dev_warn(port_private->dev, "Cannot write to %s port when md_state=%d\n",
port_conf->name, md_state);
return -ENODEV;
}
while (cur) {
cloned = skb_clone(cur, GFP_KERNEL);
cloned->len = skb_headlen(cur);
ret = t7xx_port_send_skb(port_private, cloned, 0, 0);
if (ret) {
dev_kfree_skb(cloned);
dev_err(port_private->dev, "Write error on %s port, %d\n",
port_conf->name, ret);
return cnt ? cnt + ret : ret;
}
cnt += cur->len;
if (cur == skb)
cur = skb_shinfo(skb)->frag_list;
else
cur = cur->next;
}
dev_kfree_skb(skb);
return 0;
}
static const struct wwan_port_ops wwan_ops = {
.start = t7xx_port_ctrl_start,
.stop = t7xx_port_ctrl_stop,
.tx = t7xx_port_ctrl_tx,
};
static int t7xx_port_wwan_init(struct t7xx_port *port)
{
port->rx_length_th = RX_QUEUE_MAXLEN;
return 0;
}
static void t7xx_port_wwan_uninit(struct t7xx_port *port)
{
if (!port->wwan.wwan_port)
return;
port->rx_length_th = 0;
wwan_remove_port(port->wwan.wwan_port);
port->wwan.wwan_port = NULL;
}
static int t7xx_port_wwan_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
{
if (!atomic_read(&port->usage_cnt) || !port->chan_enable) {
const struct t7xx_port_conf *port_conf = port->port_conf;
dev_kfree_skb_any(skb);
dev_err_ratelimited(port->dev, "Port %s is not opened, drop packets\n",
port_conf->name);
/* Dropping skb, caller should not access skb.*/
return 0;
}
wwan_port_rx(port->wwan.wwan_port, skb);
return 0;
}
static int t7xx_port_wwan_enable_chl(struct t7xx_port *port)
{
spin_lock(&port->port_update_lock);
port->chan_enable = true;
spin_unlock(&port->port_update_lock);
return 0;
}
static int t7xx_port_wwan_disable_chl(struct t7xx_port *port)
{
spin_lock(&port->port_update_lock);
port->chan_enable = false;
spin_unlock(&port->port_update_lock);
return 0;
}
static void t7xx_port_wwan_md_state_notify(struct t7xx_port *port, unsigned int state)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
unsigned int header_len = sizeof(struct ccci_header);
struct wwan_port_caps caps;
if (state != MD_STATE_READY)
return;
if (!port->wwan.wwan_port) {
caps.frag_len = CLDMA_MTU - header_len;
caps.headroom_len = header_len;
port->wwan.wwan_port = wwan_create_port(port->dev, port_conf->port_type,
&wwan_ops, &caps, port);
if (IS_ERR(port->wwan.wwan_port))
dev_err(port->dev, "Unable to create WWWAN port %s", port_conf->name);
}
}
struct port_ops wwan_sub_port_ops = {
.init = t7xx_port_wwan_init,
.recv_skb = t7xx_port_wwan_recv_skb,
.uninit = t7xx_port_wwan_uninit,
.enable_chl = t7xx_port_wwan_enable_chl,
.disable_chl = t7xx_port_wwan_disable_chl,
.md_state_notify = t7xx_port_wwan_md_state_notify,
};
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_port_wwan.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2022 Intel Corporation.
*/
#include <linux/debugfs.h>
#include <linux/relay.h>
#include <linux/skbuff.h>
#include <linux/wwan.h>
#include "t7xx_port.h"
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
#define T7XX_TRC_SUB_BUFF_SIZE 131072
#define T7XX_TRC_N_SUB_BUFF 32
static struct dentry *t7xx_trace_create_buf_file_handler(const char *filename,
struct dentry *parent,
umode_t mode,
struct rchan_buf *buf,
int *is_global)
{
*is_global = 1;
return debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
}
static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry)
{
debugfs_remove(dentry);
return 0;
}
static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
void *prev_subbuf, size_t prev_padding)
{
if (relay_buf_full(buf)) {
pr_err_ratelimited("Relay_buf full dropping traces");
return 0;
}
return 1;
}
static struct rchan_callbacks relay_callbacks = {
.subbuf_start = t7xx_trace_subbuf_start_handler,
.create_buf_file = t7xx_trace_create_buf_file_handler,
.remove_buf_file = t7xx_trace_remove_buf_file_handler,
};
static void t7xx_trace_port_uninit(struct t7xx_port *port)
{
struct dentry *debugfs_dir = port->t7xx_dev->debugfs_dir;
struct rchan *relaych = port->log.relaych;
if (!relaych)
return;
relay_close(relaych);
debugfs_remove_recursive(debugfs_dir);
}
static int t7xx_trace_port_recv_skb(struct t7xx_port *port, struct sk_buff *skb)
{
struct rchan *relaych = port->log.relaych;
if (!relaych)
return -EINVAL;
relay_write(relaych, skb->data, skb->len);
dev_kfree_skb(skb);
return 0;
}
static void t7xx_port_trace_md_state_notify(struct t7xx_port *port, unsigned int state)
{
struct rchan *relaych = port->log.relaych;
struct dentry *debugfs_wwan_dir;
struct dentry *debugfs_dir;
if (state != MD_STATE_READY || relaych)
return;
debugfs_wwan_dir = wwan_get_debugfs_dir(port->dev);
if (IS_ERR(debugfs_wwan_dir))
return;
debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, debugfs_wwan_dir);
if (IS_ERR_OR_NULL(debugfs_dir)) {
wwan_put_debugfs_dir(debugfs_wwan_dir);
dev_err(port->dev, "Unable to create debugfs for trace");
return;
}
relaych = relay_open("relay_ch", debugfs_dir, T7XX_TRC_SUB_BUFF_SIZE,
T7XX_TRC_N_SUB_BUFF, &relay_callbacks, NULL);
if (!relaych)
goto err_rm_debugfs_dir;
wwan_put_debugfs_dir(debugfs_wwan_dir);
port->log.relaych = relaych;
port->t7xx_dev->debugfs_dir = debugfs_dir;
return;
err_rm_debugfs_dir:
debugfs_remove_recursive(debugfs_dir);
wwan_put_debugfs_dir(debugfs_wwan_dir);
dev_err(port->dev, "Unable to create trace port %s", port->port_conf->name);
}
struct port_ops t7xx_trace_port_ops = {
.recv_skb = t7xx_trace_port_recv_skb,
.uninit = t7xx_trace_port_uninit,
.md_state_notify = t7xx_port_trace_md_state_notify,
};
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_port_trace.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <[email protected]>
* Haijun Liu <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Andy Shevchenko <[email protected]>
* Chandrashekar Devegowda <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Eliot Lee <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/wwan.h>
#include "t7xx_hif_cldma.h"
#include "t7xx_modem_ops.h"
#include "t7xx_port.h"
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
#define Q_IDX_CTRL 0
#define Q_IDX_MBIM 2
#define Q_IDX_AT_CMD 5
#define INVALID_SEQ_NUM GENMASK(15, 0)
#define for_each_proxy_port(i, p, proxy) \
for (i = 0, (p) = &(proxy)->ports[i]; \
i < (proxy)->port_count; \
i++, (p) = &(proxy)->ports[i])
static const struct t7xx_port_conf t7xx_port_conf[] = {
{
.tx_ch = PORT_CH_UART2_TX,
.rx_ch = PORT_CH_UART2_RX,
.txq_index = Q_IDX_AT_CMD,
.rxq_index = Q_IDX_AT_CMD,
.txq_exp_index = 0xff,
.rxq_exp_index = 0xff,
.path_id = CLDMA_ID_MD,
.ops = &wwan_sub_port_ops,
.name = "AT",
.port_type = WWAN_PORT_AT,
}, {
.tx_ch = PORT_CH_MBIM_TX,
.rx_ch = PORT_CH_MBIM_RX,
.txq_index = Q_IDX_MBIM,
.rxq_index = Q_IDX_MBIM,
.path_id = CLDMA_ID_MD,
.ops = &wwan_sub_port_ops,
.name = "MBIM",
.port_type = WWAN_PORT_MBIM,
}, {
#ifdef CONFIG_WWAN_DEBUGFS
.tx_ch = PORT_CH_MD_LOG_TX,
.rx_ch = PORT_CH_MD_LOG_RX,
.txq_index = 7,
.rxq_index = 7,
.txq_exp_index = 7,
.rxq_exp_index = 7,
.path_id = CLDMA_ID_MD,
.ops = &t7xx_trace_port_ops,
.name = "mdlog",
}, {
#endif
.tx_ch = PORT_CH_CONTROL_TX,
.rx_ch = PORT_CH_CONTROL_RX,
.txq_index = Q_IDX_CTRL,
.rxq_index = Q_IDX_CTRL,
.path_id = CLDMA_ID_MD,
.ops = &ctl_port_ops,
.name = "t7xx_ctrl",
}, {
.tx_ch = PORT_CH_AP_CONTROL_TX,
.rx_ch = PORT_CH_AP_CONTROL_RX,
.txq_index = Q_IDX_CTRL,
.rxq_index = Q_IDX_CTRL,
.path_id = CLDMA_ID_AP,
.ops = &ctl_port_ops,
.name = "t7xx_ap_ctrl",
},
};
static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch)
{
const struct t7xx_port_conf *port_conf;
struct t7xx_port *port;
int i;
for_each_proxy_port(i, port, port_prox) {
port_conf = port->port_conf;
if (port_conf->rx_ch == ch || port_conf->tx_ch == ch)
return port;
}
return NULL;
}
static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h)
{
u32 status = le32_to_cpu(ccci_h->status);
u16 seq_num, next_seq_num;
bool assert_bit;
seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status);
next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD);
assert_bit = status & CCCI_H_AST_BIT;
if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM)
return next_seq_num;
if (seq_num != port->seq_nums[MTK_RX])
dev_warn_ratelimited(port->dev,
"seq num out-of-order %u != %u (header %X, len %X)\n",
seq_num, port->seq_nums[MTK_RX],
le32_to_cpu(ccci_h->packet_header),
le32_to_cpu(ccci_h->packet_len));
return next_seq_num;
}
void t7xx_port_proxy_reset(struct port_proxy *port_prox)
{
struct t7xx_port *port;
int i;
for_each_proxy_port(i, port, port_prox) {
port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
port->seq_nums[MTK_TX] = 0;
}
}
static int t7xx_port_get_queue_no(struct t7xx_port *port)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ?
port_conf->txq_exp_index : port_conf->txq_index;
}
static void t7xx_port_struct_init(struct t7xx_port *port)
{
INIT_LIST_HEAD(&port->entry);
INIT_LIST_HEAD(&port->queue_entry);
skb_queue_head_init(&port->rx_skb_list);
init_waitqueue_head(&port->rx_wq);
port->seq_nums[MTK_RX] = INVALID_SEQ_NUM;
port->seq_nums[MTK_TX] = 0;
atomic_set(&port->usage_cnt, 0);
}
struct sk_buff *t7xx_port_alloc_skb(int payload)
{
struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL);
if (skb)
skb_reserve(skb, sizeof(struct ccci_header));
return skb;
}
struct sk_buff *t7xx_ctrl_alloc_skb(int payload)
{
struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header));
if (skb)
skb_reserve(skb, sizeof(struct ctrl_msg_header));
return skb;
}
/**
* t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list.
* @port: port context.
* @skb: received skb.
*
* Return:
* * 0 - Success.
* * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed.
*/
int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb)
{
unsigned long flags;
spin_lock_irqsave(&port->rx_wq.lock, flags);
if (port->rx_skb_list.qlen >= port->rx_length_th) {
spin_unlock_irqrestore(&port->rx_wq.lock, flags);
return -ENOBUFS;
}
__skb_queue_tail(&port->rx_skb_list, skb);
spin_unlock_irqrestore(&port->rx_wq.lock, flags);
wake_up_all(&port->rx_wq);
return 0;
}
static int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb)
{
enum cldma_id path_id = port->port_conf->path_id;
struct cldma_ctrl *md_ctrl;
int ret, tx_qno;
md_ctrl = port->t7xx_dev->md->md_ctrl[path_id];
tx_qno = t7xx_port_get_queue_no(port);
ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb);
if (ret)
dev_err(port->dev, "Failed to send skb: %d\n", ret);
return ret;
}
static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb,
unsigned int pkt_header, unsigned int ex_msg)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
struct ccci_header *ccci_h;
u32 status;
int ret;
ccci_h = skb_push(skb, sizeof(*ccci_h));
status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) |
FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT;
ccci_h->status = cpu_to_le32(status);
ccci_h->packet_header = cpu_to_le32(pkt_header);
ccci_h->packet_len = cpu_to_le32(skb->len);
ccci_h->ex_msg = cpu_to_le32(ex_msg);
ret = t7xx_port_send_raw_skb(port, skb);
if (ret)
return ret;
port->seq_nums[MTK_TX]++;
return 0;
}
int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg,
unsigned int ex_msg)
{
struct ctrl_msg_header *ctrl_msg_h;
unsigned int msg_len = skb->len;
u32 pkt_header = 0;
ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h));
ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg);
ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg);
ctrl_msg_h->data_length = cpu_to_le32(msg_len);
if (!msg_len)
pkt_header = CCCI_HEADER_NO_DATA;
return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
}
int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header,
unsigned int ex_msg)
{
struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
unsigned int fsm_state;
fsm_state = t7xx_fsm_get_ctl_state(ctl);
if (fsm_state != FSM_STATE_PRE_START) {
const struct t7xx_port_conf *port_conf = port->port_conf;
enum md_state md_state = t7xx_fsm_get_md_state(ctl);
switch (md_state) {
case MD_STATE_EXCEPTION:
if (port_conf->tx_ch != PORT_CH_MD_LOG_TX)
return -EBUSY;
break;
case MD_STATE_WAITING_FOR_HS1:
case MD_STATE_WAITING_FOR_HS2:
case MD_STATE_STOPPED:
case MD_STATE_WAITING_TO_STOP:
case MD_STATE_INVALID:
return -ENODEV;
default:
break;
}
}
return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg);
}
static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox)
{
struct t7xx_port *port;
int i, j;
for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++)
INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]);
for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) {
for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++)
INIT_LIST_HEAD(&port_prox->queue_ports[j][i]);
}
for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf;
enum cldma_id path_id = port_conf->path_id;
u8 ch_id;
ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch);
list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]);
list_add_tail(&port->queue_entry,
&port_prox->queue_ports[path_id][port_conf->rxq_index]);
}
}
static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev,
struct cldma_queue *queue, u16 channel)
{
struct port_proxy *port_prox = t7xx_dev->md->port_prox;
struct list_head *port_list;
struct t7xx_port *port;
u8 ch_id;
ch_id = FIELD_GET(PORT_CH_ID_MASK, channel);
port_list = &port_prox->rx_ch_ports[ch_id];
list_for_each_entry(port, port_list, entry) {
const struct t7xx_port_conf *port_conf = port->port_conf;
if (queue->md_ctrl->hif_id == port_conf->path_id &&
channel == port_conf->rx_ch)
return port;
}
return NULL;
}
/**
* t7xx_port_proxy_recv_skb() - Dispatch received skb.
* @queue: CLDMA queue.
* @skb: Socket buffer.
*
* Return:
** 0 - Packet consumed.
** -ERROR - Failed to process skb.
*/
static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
{
struct ccci_header *ccci_h = (struct ccci_header *)skb->data;
struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev;
struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl;
struct device *dev = queue->md_ctrl->dev;
const struct t7xx_port_conf *port_conf;
struct t7xx_port *port;
u16 seq_num, channel;
int ret;
channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status));
if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) {
dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel);
goto drop_skb;
}
port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel);
if (!port) {
dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel);
goto drop_skb;
}
seq_num = t7xx_port_next_rx_seq_num(port, ccci_h);
port_conf = port->port_conf;
skb_pull(skb, sizeof(*ccci_h));
ret = port_conf->ops->recv_skb(port, skb);
/* Error indicates to try again later */
if (ret) {
skb_push(skb, sizeof(*ccci_h));
return ret;
}
port->seq_nums[MTK_RX] = seq_num;
return 0;
drop_skb:
dev_kfree_skb_any(skb);
return 0;
}
/**
* t7xx_port_proxy_md_status_notify() - Notify all ports of state.
*@port_prox: The port_proxy pointer.
*@state: State.
*
* Called by t7xx_fsm. Used to dispatch modem status for all ports,
* which want to know MD state transition.
*/
void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state)
{
struct t7xx_port *port;
int i;
for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf;
if (port_conf->ops->md_state_notify)
port_conf->ops->md_state_notify(port, state);
}
}
static void t7xx_proxy_init_all_ports(struct t7xx_modem *md)
{
struct port_proxy *port_prox = md->port_prox;
struct t7xx_port *port;
int i;
for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf;
t7xx_port_struct_init(port);
if (port_conf->tx_ch == PORT_CH_CONTROL_TX)
md->core_md.ctl_port = port;
if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX)
md->core_ap.ctl_port = port;
port->t7xx_dev = md->t7xx_dev;
port->dev = &md->t7xx_dev->pdev->dev;
spin_lock_init(&port->port_update_lock);
port->chan_enable = false;
if (port_conf->ops->init)
port_conf->ops->init(port);
}
t7xx_proxy_setup_ch_mapping(port_prox);
}
static int t7xx_proxy_alloc(struct t7xx_modem *md)
{
unsigned int port_count = ARRAY_SIZE(t7xx_port_conf);
struct device *dev = &md->t7xx_dev->pdev->dev;
struct port_proxy *port_prox;
int i;
port_prox = devm_kzalloc(dev, sizeof(*port_prox) + sizeof(struct t7xx_port) * port_count,
GFP_KERNEL);
if (!port_prox)
return -ENOMEM;
md->port_prox = port_prox;
port_prox->dev = dev;
for (i = 0; i < port_count; i++)
port_prox->ports[i].port_conf = &t7xx_port_conf[i];
port_prox->port_count = port_count;
t7xx_proxy_init_all_ports(md);
return 0;
}
/**
* t7xx_port_proxy_init() - Initialize ports.
* @md: Modem.
*
* Create all port instances.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code from failure sub-initializations.
*/
int t7xx_port_proxy_init(struct t7xx_modem *md)
{
int ret;
ret = t7xx_proxy_alloc(md);
if (ret)
return ret;
t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb);
t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb);
return 0;
}
void t7xx_port_proxy_uninit(struct port_proxy *port_prox)
{
struct t7xx_port *port;
int i;
for_each_proxy_port(i, port, port_prox) {
const struct t7xx_port_conf *port_conf = port->port_conf;
if (port_conf->ops->uninit)
port_conf->ops->uninit(port);
}
}
int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id,
bool en_flag)
{
struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id);
const struct t7xx_port_conf *port_conf;
if (!port)
return -EINVAL;
port_conf = port->port_conf;
if (en_flag) {
if (port_conf->ops->enable_chl)
port_conf->ops->enable_chl(port);
} else {
if (port_conf->ops->disable_chl)
port_conf->ops->disable_chl(port);
}
return 0;
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_port_proxy.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <[email protected]>
* Haijun Liu <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Chiranjeevi Rapolu <[email protected]>
* Eliot Lee <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
#include "t7xx_hif_dpmaif_rx.h"
#include "t7xx_hif_dpmaif_tx.h"
#include "t7xx_pci.h"
#include "t7xx_pcie_mac.h"
#include "t7xx_state_monitor.h"
unsigned int t7xx_ring_buf_get_next_wr_idx(unsigned int buf_len, unsigned int buf_idx)
{
buf_idx++;
return buf_idx < buf_len ? buf_idx : 0;
}
unsigned int t7xx_ring_buf_rd_wr_count(unsigned int total_cnt, unsigned int rd_idx,
unsigned int wr_idx, enum dpmaif_rdwr rd_wr)
{
int pkt_cnt;
if (rd_wr == DPMAIF_READ)
pkt_cnt = wr_idx - rd_idx;
else
pkt_cnt = rd_idx - wr_idx - 1;
if (pkt_cnt < 0)
pkt_cnt += total_cnt;
return (unsigned int)pkt_cnt;
}
static void t7xx_dpmaif_enable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_isr_para *isr_para;
int i;
for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
isr_para = &dpmaif_ctrl->isr_para[i];
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
}
}
static void t7xx_dpmaif_disable_irq(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_isr_para *isr_para;
int i;
for (i = 0; i < ARRAY_SIZE(dpmaif_ctrl->isr_para); i++) {
isr_para = &dpmaif_ctrl->isr_para[i];
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
}
}
static void t7xx_dpmaif_irq_cb(struct dpmaif_isr_para *isr_para)
{
struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
struct dpmaif_hw_intr_st_para intr_status;
struct device *dev = dpmaif_ctrl->dev;
struct dpmaif_hw_info *hw_info;
int i;
memset(&intr_status, 0, sizeof(intr_status));
hw_info = &dpmaif_ctrl->hw_info;
if (t7xx_dpmaif_hw_get_intr_cnt(hw_info, &intr_status, isr_para->dlq_id) < 0) {
dev_err(dev, "Failed to get HW interrupt count\n");
return;
}
t7xx_pcie_mac_clear_int_status(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
for (i = 0; i < intr_status.intr_cnt; i++) {
switch (intr_status.intr_types[i]) {
case DPF_INTR_UL_DONE:
t7xx_dpmaif_irq_tx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
break;
case DPF_INTR_UL_DRB_EMPTY:
case DPF_INTR_UL_MD_NOTREADY:
case DPF_INTR_UL_MD_PWR_NOTREADY:
/* No need to log an error for these */
break;
case DPF_INTR_DL_BATCNT_LEN_ERR:
dev_err_ratelimited(dev, "DL interrupt: packet BAT count length error\n");
t7xx_dpmaif_dl_unmask_batcnt_len_err_intr(hw_info);
break;
case DPF_INTR_DL_PITCNT_LEN_ERR:
dev_err_ratelimited(dev, "DL interrupt: PIT count length error\n");
t7xx_dpmaif_dl_unmask_pitcnt_len_err_intr(hw_info);
break;
case DPF_INTR_DL_Q0_PITCNT_LEN_ERR:
dev_err_ratelimited(dev, "DL interrupt: DLQ0 PIT count length error\n");
t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO_DFT);
break;
case DPF_INTR_DL_Q1_PITCNT_LEN_ERR:
dev_err_ratelimited(dev, "DL interrupt: DLQ1 PIT count length error\n");
t7xx_dpmaif_dlq_unmask_pitcnt_len_err_intr(hw_info, DPF_RX_QNO1);
break;
case DPF_INTR_DL_DONE:
case DPF_INTR_DL_Q0_DONE:
case DPF_INTR_DL_Q1_DONE:
t7xx_dpmaif_irq_rx_done(dpmaif_ctrl, intr_status.intr_queues[i]);
break;
default:
dev_err_ratelimited(dev, "DL interrupt error: unknown type : %d\n",
intr_status.intr_types[i]);
}
}
}
static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
{
struct dpmaif_isr_para *isr_para = data;
struct dpmaif_ctrl *dpmaif_ctrl;
dpmaif_ctrl = isr_para->dpmaif_ctrl;
if (dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
dev_err(dpmaif_ctrl->dev, "Interrupt received before initializing DPMAIF\n");
return IRQ_HANDLED;
}
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_WAKE_THREAD;
}
static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data)
{
struct dpmaif_isr_para *isr_para = data;
struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
t7xx_dpmaif_irq_cb(isr_para);
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_HANDLED;
}
static void t7xx_dpmaif_isr_parameter_init(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_isr_para *isr_para;
unsigned char i;
dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO0] = DPMAIF_INT;
dpmaif_ctrl->rxq_int_mapping[DPF_RX_QNO1] = DPMAIF2_INT;
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
isr_para = &dpmaif_ctrl->isr_para[i];
isr_para->dpmaif_ctrl = dpmaif_ctrl;
isr_para->dlq_id = i;
isr_para->pcie_int = dpmaif_ctrl->rxq_int_mapping[i];
}
}
static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct t7xx_pci_dev *t7xx_dev = dpmaif_ctrl->t7xx_dev;
struct dpmaif_isr_para *isr_para;
enum t7xx_int int_type;
int i;
t7xx_dpmaif_isr_parameter_init(dpmaif_ctrl);
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
isr_para = &dpmaif_ctrl->isr_para[i];
int_type = isr_para->pcie_int;
t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread;
t7xx_dev->callback_param[int_type] = isr_para;
t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
t7xx_pcie_mac_set_int(t7xx_dev, int_type);
}
}
static int t7xx_dpmaif_rxtx_sw_allocs(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_rx_queue *rx_q;
struct dpmaif_tx_queue *tx_q;
int ret, rx_idx, tx_idx, i;
ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, BAT_TYPE_NORMAL);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to allocate normal BAT table: %d\n", ret);
return ret;
}
ret = t7xx_dpmaif_bat_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, BAT_TYPE_FRAG);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to allocate frag BAT table: %d\n", ret);
goto err_free_normal_bat;
}
for (rx_idx = 0; rx_idx < DPMAIF_RXQ_NUM; rx_idx++) {
rx_q = &dpmaif_ctrl->rxq[rx_idx];
rx_q->index = rx_idx;
rx_q->dpmaif_ctrl = dpmaif_ctrl;
ret = t7xx_dpmaif_rxq_init(rx_q);
if (ret)
goto err_free_rxq;
}
for (tx_idx = 0; tx_idx < DPMAIF_TXQ_NUM; tx_idx++) {
tx_q = &dpmaif_ctrl->txq[tx_idx];
tx_q->index = tx_idx;
tx_q->dpmaif_ctrl = dpmaif_ctrl;
ret = t7xx_dpmaif_txq_init(tx_q);
if (ret)
goto err_free_txq;
}
ret = t7xx_dpmaif_tx_thread_init(dpmaif_ctrl);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to start TX thread\n");
goto err_free_txq;
}
ret = t7xx_dpmaif_bat_rel_wq_alloc(dpmaif_ctrl);
if (ret)
goto err_thread_rel;
return 0;
err_thread_rel:
t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
err_free_txq:
for (i = 0; i < tx_idx; i++) {
tx_q = &dpmaif_ctrl->txq[i];
t7xx_dpmaif_txq_free(tx_q);
}
err_free_rxq:
for (i = 0; i < rx_idx; i++) {
rx_q = &dpmaif_ctrl->rxq[i];
t7xx_dpmaif_rxq_free(rx_q);
}
t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_frag);
err_free_normal_bat:
t7xx_dpmaif_bat_free(dpmaif_ctrl, &dpmaif_ctrl->bat_req);
return ret;
}
static void t7xx_dpmaif_sw_release(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_rx_queue *rx_q;
struct dpmaif_tx_queue *tx_q;
int i;
t7xx_dpmaif_tx_thread_rel(dpmaif_ctrl);
t7xx_dpmaif_bat_wq_rel(dpmaif_ctrl);
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
tx_q = &dpmaif_ctrl->txq[i];
t7xx_dpmaif_txq_free(tx_q);
}
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
rx_q = &dpmaif_ctrl->rxq[i];
t7xx_dpmaif_rxq_free(rx_q);
}
}
static int t7xx_dpmaif_start(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
struct dpmaif_hw_params hw_init_para;
struct dpmaif_rx_queue *rxq;
struct dpmaif_tx_queue *txq;
unsigned int buf_cnt;
int i, ret = 0;
if (dpmaif_ctrl->state == DPMAIF_STATE_PWRON)
return -EFAULT;
memset(&hw_init_para, 0, sizeof(hw_init_para));
for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
rxq = &dpmaif_ctrl->rxq[i];
rxq->que_started = true;
rxq->index = i;
rxq->budget = rxq->bat_req->bat_size_cnt - 1;
hw_init_para.pkt_bat_base_addr[i] = rxq->bat_req->bat_bus_addr;
hw_init_para.pkt_bat_size_cnt[i] = rxq->bat_req->bat_size_cnt;
hw_init_para.pit_base_addr[i] = rxq->pit_bus_addr;
hw_init_para.pit_size_cnt[i] = rxq->pit_size_cnt;
hw_init_para.frg_bat_base_addr[i] = rxq->bat_frag->bat_bus_addr;
hw_init_para.frg_bat_size_cnt[i] = rxq->bat_frag->bat_size_cnt;
}
bitmap_zero(dpmaif_ctrl->bat_req.bat_bitmap, dpmaif_ctrl->bat_req.bat_size_cnt);
buf_cnt = dpmaif_ctrl->bat_req.bat_size_cnt - 1;
ret = t7xx_dpmaif_rx_buf_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_req, 0, buf_cnt, true);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to allocate RX buffer: %d\n", ret);
return ret;
}
buf_cnt = dpmaif_ctrl->bat_frag.bat_size_cnt - 1;
ret = t7xx_dpmaif_rx_frag_alloc(dpmaif_ctrl, &dpmaif_ctrl->bat_frag, buf_cnt, true);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to allocate frag RX buffer: %d\n", ret);
goto err_free_normal_bat;
}
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
txq = &dpmaif_ctrl->txq[i];
txq->que_started = true;
hw_init_para.drb_base_addr[i] = txq->drb_bus_addr;
hw_init_para.drb_size_cnt[i] = txq->drb_size_cnt;
}
ret = t7xx_dpmaif_hw_init(hw_info, &hw_init_para);
if (ret) {
dev_err(dpmaif_ctrl->dev, "Failed to initialize DPMAIF HW: %d\n", ret);
goto err_free_frag_bat;
}
ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(hw_info, rxq->bat_req->bat_size_cnt - 1);
if (ret)
goto err_free_frag_bat;
ret = t7xx_dpmaif_dl_snd_hw_frg_cnt(hw_info, rxq->bat_frag->bat_size_cnt - 1);
if (ret)
goto err_free_frag_bat;
t7xx_dpmaif_ul_clr_all_intr(hw_info);
t7xx_dpmaif_dl_clr_all_intr(hw_info);
dpmaif_ctrl->state = DPMAIF_STATE_PWRON;
t7xx_dpmaif_enable_irq(dpmaif_ctrl);
wake_up(&dpmaif_ctrl->tx_wq);
return 0;
err_free_frag_bat:
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
err_free_normal_bat:
t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
return ret;
}
static void t7xx_dpmaif_stop_sw(struct dpmaif_ctrl *dpmaif_ctrl)
{
t7xx_dpmaif_tx_stop(dpmaif_ctrl);
t7xx_dpmaif_rx_stop(dpmaif_ctrl);
}
static void t7xx_dpmaif_stop_hw(struct dpmaif_ctrl *dpmaif_ctrl)
{
t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
}
static int t7xx_dpmaif_stop(struct dpmaif_ctrl *dpmaif_ctrl)
{
if (!dpmaif_ctrl->dpmaif_sw_init_done) {
dev_err(dpmaif_ctrl->dev, "dpmaif SW init fail\n");
return -EFAULT;
}
if (dpmaif_ctrl->state == DPMAIF_STATE_PWROFF)
return -EFAULT;
t7xx_dpmaif_disable_irq(dpmaif_ctrl);
dpmaif_ctrl->state = DPMAIF_STATE_PWROFF;
t7xx_dpmaif_stop_sw(dpmaif_ctrl);
t7xx_dpmaif_tx_clear(dpmaif_ctrl);
t7xx_dpmaif_rx_clear(dpmaif_ctrl);
return 0;
}
static int t7xx_dpmaif_suspend(struct t7xx_pci_dev *t7xx_dev, void *param)
{
struct dpmaif_ctrl *dpmaif_ctrl = param;
t7xx_dpmaif_tx_stop(dpmaif_ctrl);
t7xx_dpmaif_hw_stop_all_txq(&dpmaif_ctrl->hw_info);
t7xx_dpmaif_hw_stop_all_rxq(&dpmaif_ctrl->hw_info);
t7xx_dpmaif_disable_irq(dpmaif_ctrl);
t7xx_dpmaif_rx_stop(dpmaif_ctrl);
return 0;
}
static void t7xx_dpmaif_unmask_dlq_intr(struct dpmaif_ctrl *dpmaif_ctrl)
{
int qno;
for (qno = 0; qno < DPMAIF_RXQ_NUM; qno++)
t7xx_dpmaif_dlq_unmask_rx_done(&dpmaif_ctrl->hw_info, qno);
}
static void t7xx_dpmaif_start_txrx_qs(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_rx_queue *rxq;
struct dpmaif_tx_queue *txq;
unsigned int que_cnt;
for (que_cnt = 0; que_cnt < DPMAIF_TXQ_NUM; que_cnt++) {
txq = &dpmaif_ctrl->txq[que_cnt];
txq->que_started = true;
}
for (que_cnt = 0; que_cnt < DPMAIF_RXQ_NUM; que_cnt++) {
rxq = &dpmaif_ctrl->rxq[que_cnt];
rxq->que_started = true;
}
}
static int t7xx_dpmaif_resume(struct t7xx_pci_dev *t7xx_dev, void *param)
{
struct dpmaif_ctrl *dpmaif_ctrl = param;
if (!dpmaif_ctrl)
return 0;
t7xx_dpmaif_start_txrx_qs(dpmaif_ctrl);
t7xx_dpmaif_enable_irq(dpmaif_ctrl);
t7xx_dpmaif_unmask_dlq_intr(dpmaif_ctrl);
t7xx_dpmaif_start_hw(&dpmaif_ctrl->hw_info);
wake_up(&dpmaif_ctrl->tx_wq);
return 0;
}
static int t7xx_dpmaif_pm_entity_init(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
int ret;
INIT_LIST_HEAD(&dpmaif_pm_entity->entity);
dpmaif_pm_entity->suspend = &t7xx_dpmaif_suspend;
dpmaif_pm_entity->suspend_late = NULL;
dpmaif_pm_entity->resume_early = NULL;
dpmaif_pm_entity->resume = &t7xx_dpmaif_resume;
dpmaif_pm_entity->id = PM_ENTITY_ID_DATA;
dpmaif_pm_entity->entity_param = dpmaif_ctrl;
ret = t7xx_pci_pm_entity_register(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
if (ret)
dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
return ret;
}
static int t7xx_dpmaif_pm_entity_release(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct md_pm_entity *dpmaif_pm_entity = &dpmaif_ctrl->dpmaif_pm_entity;
int ret;
ret = t7xx_pci_pm_entity_unregister(dpmaif_ctrl->t7xx_dev, dpmaif_pm_entity);
if (ret < 0)
dev_err(dpmaif_ctrl->dev, "dpmaif register pm_entity fail\n");
return ret;
}
int t7xx_dpmaif_md_state_callback(struct dpmaif_ctrl *dpmaif_ctrl, enum md_state state)
{
int ret = 0;
switch (state) {
case MD_STATE_WAITING_FOR_HS1:
ret = t7xx_dpmaif_start(dpmaif_ctrl);
break;
case MD_STATE_EXCEPTION:
ret = t7xx_dpmaif_stop(dpmaif_ctrl);
break;
case MD_STATE_STOPPED:
ret = t7xx_dpmaif_stop(dpmaif_ctrl);
break;
case MD_STATE_WAITING_TO_STOP:
t7xx_dpmaif_stop_hw(dpmaif_ctrl);
break;
default:
break;
}
return ret;
}
/**
* t7xx_dpmaif_hif_init() - Initialize data path.
* @t7xx_dev: MTK context structure.
* @callbacks: Callbacks implemented by the network layer to handle RX skb and
* event notifications.
*
* Allocate and initialize datapath control block.
* Register datapath ISR, TX and RX resources.
*
* Return:
* * dpmaif_ctrl pointer - Pointer to DPMAIF context structure.
* * NULL - In case of error.
*/
struct dpmaif_ctrl *t7xx_dpmaif_hif_init(struct t7xx_pci_dev *t7xx_dev,
struct dpmaif_callbacks *callbacks)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct dpmaif_ctrl *dpmaif_ctrl;
int ret;
if (!callbacks)
return NULL;
dpmaif_ctrl = devm_kzalloc(dev, sizeof(*dpmaif_ctrl), GFP_KERNEL);
if (!dpmaif_ctrl)
return NULL;
dpmaif_ctrl->t7xx_dev = t7xx_dev;
dpmaif_ctrl->callbacks = callbacks;
dpmaif_ctrl->dev = dev;
dpmaif_ctrl->dpmaif_sw_init_done = false;
dpmaif_ctrl->hw_info.dev = dev;
dpmaif_ctrl->hw_info.pcie_base = t7xx_dev->base_addr.pcie_ext_reg_base -
t7xx_dev->base_addr.pcie_dev_reg_trsl_addr;
ret = t7xx_dpmaif_pm_entity_init(dpmaif_ctrl);
if (ret)
return NULL;
t7xx_dpmaif_register_pcie_irq(dpmaif_ctrl);
t7xx_dpmaif_disable_irq(dpmaif_ctrl);
ret = t7xx_dpmaif_rxtx_sw_allocs(dpmaif_ctrl);
if (ret) {
t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
dev_err(dev, "Failed to allocate RX/TX SW resources: %d\n", ret);
return NULL;
}
dpmaif_ctrl->dpmaif_sw_init_done = true;
return dpmaif_ctrl;
}
void t7xx_dpmaif_hif_exit(struct dpmaif_ctrl *dpmaif_ctrl)
{
if (dpmaif_ctrl->dpmaif_sw_init_done) {
t7xx_dpmaif_stop(dpmaif_ctrl);
t7xx_dpmaif_pm_entity_release(dpmaif_ctrl);
t7xx_dpmaif_sw_release(dpmaif_ctrl);
dpmaif_ctrl->dpmaif_sw_init_done = false;
}
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Chandrashekar Devegowda <[email protected]>
* Haijun Liu <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Amir Hanania <[email protected]>
* Andy Shevchenko <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Eliot Lee <[email protected]>
* Moises Veleta <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
#include <net/ipv6.h>
#include <net/pkt_sched.h>
#include "t7xx_hif_dpmaif_rx.h"
#include "t7xx_hif_dpmaif_tx.h"
#include "t7xx_netdev.h"
#include "t7xx_pci.h"
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
#define IP_MUX_SESSION_DEFAULT 0
#define SBD_PACKET_TYPE_MASK GENMASK(7, 4)
static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
{
struct dpmaif_ctrl *ctrl;
int i, ret;
ctrl = ctlb->hif_ctrl;
if (ctlb->is_napi_en)
return;
for (i = 0; i < RXQ_NUM; i++) {
/* The usage count has to be bumped every time before calling
* napi_schedule. It will be decresed in the poll routine,
* right after napi_complete_done is called.
*/
ret = pm_runtime_resume_and_get(ctrl->dev);
if (ret < 0) {
dev_err(ctrl->dev, "Failed to resume device: %d\n",
ret);
return;
}
napi_enable(ctlb->napi[i]);
napi_schedule(ctlb->napi[i]);
}
ctlb->is_napi_en = true;
}
static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb)
{
int i;
if (!ctlb->is_napi_en)
return;
for (i = 0; i < RXQ_NUM; i++) {
napi_synchronize(ctlb->napi[i]);
napi_disable(ctlb->napi[i]);
}
ctlb->is_napi_en = false;
}
static int t7xx_ccmni_open(struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt))
t7xx_ccmni_enable_napi(ccmni_ctl);
atomic_inc(&ccmni->usage);
return 0;
}
static int t7xx_ccmni_close(struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
atomic_dec(&ccmni->usage);
if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt))
t7xx_ccmni_disable_napi(ccmni_ctl);
netif_carrier_off(dev);
netif_tx_disable(dev);
return 0;
}
static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
unsigned int txq_number)
{
struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
skb_cb->netif_idx = ccmni->index;
if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
return NETDEV_TX_BUSY;
return 0;
}
static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
int skb_len = skb->len;
/* If MTU is changed or there is no headroom, drop the packet */
if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
return NETDEV_TX_BUSY;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb_len;
return NETDEV_TX_OK;
}
static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
{
struct t7xx_ccmni *ccmni = netdev_priv(dev);
dev->stats.tx_errors++;
if (atomic_read(&ccmni->usage) > 0)
netif_tx_wake_all_queues(dev);
}
static const struct net_device_ops ccmni_netdev_ops = {
.ndo_open = t7xx_ccmni_open,
.ndo_stop = t7xx_ccmni_close,
.ndo_start_xmit = t7xx_ccmni_start_xmit,
.ndo_tx_timeout = t7xx_ccmni_tx_timeout,
};
static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
{
struct t7xx_ccmni *ccmni;
int i;
for (i = 0; i < ctlb->nic_dev_num; i++) {
ccmni = ctlb->ccmni_inst[i];
if (!ccmni)
continue;
if (atomic_read(&ccmni->usage) > 0) {
netif_tx_start_all_queues(ccmni->dev);
netif_carrier_on(ccmni->dev);
}
}
if (atomic_read(&ctlb->napi_usr_refcnt))
t7xx_ccmni_enable_napi(ctlb);
}
static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
{
struct t7xx_ccmni *ccmni;
int i;
for (i = 0; i < ctlb->nic_dev_num; i++) {
ccmni = ctlb->ccmni_inst[i];
if (!ccmni)
continue;
if (atomic_read(&ccmni->usage) > 0)
netif_tx_disable(ccmni->dev);
}
}
static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
{
struct t7xx_ccmni *ccmni;
int i;
if (atomic_read(&ctlb->napi_usr_refcnt))
t7xx_ccmni_disable_napi(ctlb);
for (i = 0; i < ctlb->nic_dev_num; i++) {
ccmni = ctlb->ccmni_inst[i];
if (!ccmni)
continue;
if (atomic_read(&ccmni->usage) > 0)
netif_carrier_off(ccmni->dev);
}
}
static void t7xx_ccmni_wwan_setup(struct net_device *dev)
{
dev->needed_headroom += sizeof(struct ccci_header);
dev->mtu = ETH_DATA_LEN;
dev->max_mtu = CCMNI_MTU_MAX;
BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->features = NETIF_F_VLAN_CHALLENGED;
dev->features |= NETIF_F_SG;
dev->hw_features |= NETIF_F_SG;
dev->features |= NETIF_F_HW_CSUM;
dev->hw_features |= NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GRO;
dev->hw_features |= NETIF_F_GRO;
dev->needs_free_netdev = true;
dev->type = ARPHRD_NONE;
dev->netdev_ops = &ccmni_netdev_ops;
}
static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
{
int i;
/* one HW, but shared with multiple net devices,
* so add a dummy device for NAPI.
*/
init_dummy_netdev(&ctlb->dummy_dev);
atomic_set(&ctlb->napi_usr_refcnt, 0);
ctlb->is_napi_en = false;
for (i = 0; i < RXQ_NUM; i++) {
ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
NIC_NAPI_POLL_BUDGET);
}
}
static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
{
int i;
for (i = 0; i < RXQ_NUM; i++) {
netif_napi_del(ctlb->napi[i]);
ctlb->napi[i] = NULL;
}
}
static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
struct netlink_ext_ack *extack)
{
struct t7xx_ccmni_ctrl *ctlb = ctxt;
struct t7xx_ccmni *ccmni;
int ret;
if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
return -EINVAL;
ccmni = wwan_netdev_drvpriv(dev);
ccmni->index = if_id;
ccmni->ctlb = ctlb;
ccmni->dev = dev;
atomic_set(&ccmni->usage, 0);
ctlb->ccmni_inst[if_id] = ccmni;
ret = register_netdevice(dev);
if (ret)
return ret;
netif_device_attach(dev);
return 0;
}
static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
{
struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
struct t7xx_ccmni_ctrl *ctlb = ctxt;
u8 if_id = ccmni->index;
if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
return;
if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
return;
unregister_netdevice(dev);
}
static const struct wwan_ops ccmni_wwan_ops = {
.priv_size = sizeof(struct t7xx_ccmni),
.setup = t7xx_ccmni_wwan_setup,
.newlink = t7xx_ccmni_wwan_newlink,
.dellink = t7xx_ccmni_wwan_dellink,
};
static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
{
struct device *dev = ctlb->hif_ctrl->dev;
int ret;
if (ctlb->wwan_is_registered)
return 0;
/* WWAN core will create a netdev for the default IP MUX channel */
ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
if (ret < 0) {
dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
return ret;
}
ctlb->wwan_is_registered = true;
return 0;
}
static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
{
struct t7xx_ccmni_ctrl *ctlb = para;
struct device *dev;
int ret = 0;
dev = ctlb->hif_ctrl->dev;
ctlb->md_sta = state;
switch (state) {
case MD_STATE_READY:
ret = t7xx_ccmni_register_wwan(ctlb);
if (!ret)
t7xx_ccmni_start(ctlb);
break;
case MD_STATE_EXCEPTION:
case MD_STATE_STOPPED:
t7xx_ccmni_pre_stop(ctlb);
ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
if (ret < 0)
dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
t7xx_ccmni_post_stop(ctlb);
break;
case MD_STATE_WAITING_FOR_HS1:
case MD_STATE_WAITING_TO_STOP:
ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
if (ret < 0)
dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
break;
default:
break;
}
return ret;
}
static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
{
struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
struct t7xx_fsm_notifier *md_status_notifier;
md_status_notifier = &ctlb->md_status_notify;
INIT_LIST_HEAD(&md_status_notifier->entry);
md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
md_status_notifier->data = ctlb;
t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
}
static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
struct napi_struct *napi)
{
struct t7xx_skb_cb *skb_cb;
struct net_device *net_dev;
struct t7xx_ccmni *ccmni;
int pkt_type, skb_len;
u8 netif_id;
skb_cb = T7XX_SKB_CB(skb);
netif_id = skb_cb->netif_idx;
ccmni = ccmni_ctlb->ccmni_inst[netif_id];
if (!ccmni) {
dev_kfree_skb(skb);
return;
}
net_dev = ccmni->dev;
pkt_type = skb_cb->rx_pkt_type;
skb->dev = net_dev;
if (pkt_type == PKT_TYPE_IP6)
skb->protocol = htons(ETH_P_IPV6);
else
skb->protocol = htons(ETH_P_IP);
skb_len = skb->len;
napi_gro_receive(napi, skb);
net_dev->stats.rx_packets++;
net_dev->stats.rx_bytes += skb_len;
}
static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
{
struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
struct netdev_queue *net_queue;
if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
net_queue = netdev_get_tx_queue(ccmni->dev, qno);
if (netif_tx_queue_stopped(net_queue))
netif_tx_wake_queue(net_queue);
}
}
static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
{
struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
struct netdev_queue *net_queue;
if (atomic_read(&ccmni->usage) > 0) {
netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
net_queue = netdev_get_tx_queue(ccmni->dev, qno);
netif_tx_stop_queue(net_queue);
}
}
static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
enum dpmaif_txq_state state, int qno)
{
struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
if (ctlb->md_sta != MD_STATE_READY)
return;
if (!ctlb->ccmni_inst[0]) {
dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
return;
}
if (state == DMPAIF_TXQ_STATE_IRQ)
t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
else if (state == DMPAIF_TXQ_STATE_FULL)
t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
}
int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct t7xx_ccmni_ctrl *ctlb;
ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
if (!ctlb)
return -ENOMEM;
t7xx_dev->ccmni_ctlb = ctlb;
ctlb->t7xx_dev = t7xx_dev;
ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
ctlb->nic_dev_num = NIC_DEV_DEFAULT;
ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
if (!ctlb->hif_ctrl)
return -ENOMEM;
t7xx_init_netdev_napi(ctlb);
init_md_status_notifier(t7xx_dev);
return 0;
}
void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
{
struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
if (ctlb->wwan_is_registered) {
wwan_unregister_ops(&t7xx_dev->pdev->dev);
ctlb->wwan_is_registered = false;
}
t7xx_uninit_netdev_napi(ctlb);
t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_netdev.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Amir Hanania <[email protected]>
* Haijun Liu <[email protected]>
* Eliot Lee <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Chiranjeevi Rapolu <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/netdevice.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "t7xx_dpmaif.h"
#include "t7xx_hif_dpmaif.h"
#include "t7xx_hif_dpmaif_tx.h"
#include "t7xx_pci.h"
#define DPMAIF_SKB_TX_BURST_CNT 5
#define DPMAIF_DRB_LIST_LEN 6144
/* DRB dtype */
#define DES_DTYP_PD 0
#define DES_DTYP_MSG 1
static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
unsigned int q_num)
{
struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
unsigned long flags;
if (!txq->que_started)
return 0;
old_sw_rd_idx = txq->drb_rd_idx;
new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
return 0;
}
if (old_sw_rd_idx <= new_hw_rd_idx)
drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
else
drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
spin_lock_irqsave(&txq->tx_lock, flags);
txq->drb_rd_idx = new_hw_rd_idx;
spin_unlock_irqrestore(&txq->tx_lock, flags);
return drb_cnt;
}
static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
unsigned int q_num, unsigned int release_cnt)
{
struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
struct dpmaif_drb *cur_drb, *drb_base;
unsigned int drb_cnt, i, cur_idx;
unsigned long flags;
drb_skb_base = txq->drb_skb_base;
drb_base = txq->drb_base;
spin_lock_irqsave(&txq->tx_lock, flags);
drb_cnt = txq->drb_size_cnt;
cur_idx = txq->drb_release_rd_idx;
spin_unlock_irqrestore(&txq->tx_lock, flags);
for (i = 0; i < release_cnt; i++) {
cur_drb = drb_base + cur_idx;
if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
cur_drb_skb = drb_skb_base + cur_idx;
if (!cur_drb_skb->is_msg)
dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
cur_drb_skb->data_len, DMA_TO_DEVICE);
if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
if (!cur_drb_skb->skb) {
dev_err(dpmaif_ctrl->dev,
"txq%u: DRB check fail, invalid skb\n", q_num);
continue;
}
dev_kfree_skb_any(cur_drb_skb->skb);
}
cur_drb_skb->skb = NULL;
}
spin_lock_irqsave(&txq->tx_lock, flags);
cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
txq->drb_release_rd_idx = cur_idx;
spin_unlock_irqrestore(&txq->tx_lock, flags);
if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
}
if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
return i;
}
static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
unsigned int q_num, unsigned int budget)
{
struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
unsigned int rel_cnt, real_rel_cnt;
/* Update read index from HW */
t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
txq->drb_rd_idx, DPMAIF_READ);
real_rel_cnt = min_not_zero(budget, rel_cnt);
if (real_rel_cnt)
real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
}
static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
{
return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
}
static void t7xx_dpmaif_tx_done(struct work_struct *work)
{
struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
struct dpmaif_hw_info *hw_info;
int ret;
ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
if (ret < 0 && ret != -EACCES)
return;
/* The device may be in low power state. Disable sleep if needed */
t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
hw_info = &dpmaif_ctrl->hw_info;
ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
if (ret == -EAGAIN ||
(t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
t7xx_dpmaif_drb_ring_not_empty(txq))) {
queue_work(dpmaif_ctrl->txq[txq->index].worker,
&dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
/* Give the device time to enter the low power state */
t7xx_dpmaif_clr_ip_busy_sts(hw_info);
} else {
t7xx_dpmaif_clr_ip_busy_sts(hw_info);
t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
}
}
t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
unsigned int channel_id)
{
struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
struct dpmaif_drb *drb = drb_base + cur_idx;
drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
FIELD_PREP(DRB_HDR_CONT, 1) |
FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
FIELD_PREP(DRB_MSG_L4_CHK, 1));
}
static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
unsigned int cur_idx, dma_addr_t data_addr,
unsigned int pkt_size, bool last_one)
{
struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
struct dpmaif_drb *drb = drb_base + cur_idx;
u32 header;
header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
if (!last_one)
header |= FIELD_PREP(DRB_HDR_CONT, 1);
drb->header = cpu_to_le32(header);
drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
}
static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
bool is_frag, bool is_last_one, dma_addr_t bus_addr,
unsigned int data_len)
{
struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
drb_skb->skb = skb;
drb_skb->bus_addr = bus_addr;
drb_skb->data_len = data_len;
drb_skb->index = cur_idx;
drb_skb->is_msg = is_msg;
drb_skb->is_frag = is_frag;
drb_skb->is_last = is_last_one;
}
static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
{
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
unsigned int wr_cnt, send_cnt, payload_cnt;
unsigned int cur_idx, drb_wr_idx_backup;
struct skb_shared_info *shinfo;
struct dpmaif_tx_queue *txq;
struct t7xx_skb_cb *skb_cb;
unsigned long flags;
skb_cb = T7XX_SKB_CB(skb);
txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
return -ENODEV;
atomic_set(&txq->tx_processing, 1);
/* Ensure tx_processing is changed to 1 before actually begin TX flow */
smp_mb();
shinfo = skb_shinfo(skb);
if (shinfo->frag_list)
dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
payload_cnt = shinfo->nr_frags + 1;
/* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */
send_cnt = payload_cnt + 1;
spin_lock_irqsave(&txq->tx_lock, flags);
cur_idx = txq->drb_wr_idx;
drb_wr_idx_backup = cur_idx;
txq->drb_wr_idx += send_cnt;
if (txq->drb_wr_idx >= txq->drb_size_cnt)
txq->drb_wr_idx -= txq->drb_size_cnt;
t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
spin_unlock_irqrestore(&txq->tx_lock, flags);
for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
unsigned int data_len;
dma_addr_t bus_addr;
void *data_addr;
if (!wr_cnt) {
data_len = skb_headlen(skb);
data_addr = skb->data;
is_frag = false;
} else {
skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
data_len = skb_frag_size(frag);
data_addr = skb_frag_address(frag);
is_frag = true;
}
bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
goto unmap_buffers;
cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
spin_lock_irqsave(&txq->tx_lock, flags);
t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
is_last_one);
t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
is_last_one, bus_addr, data_len);
spin_unlock_irqrestore(&txq->tx_lock, flags);
}
if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
atomic_set(&txq->tx_processing, 0);
return 0;
unmap_buffers:
while (wr_cnt--) {
struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
drb_skb += cur_idx;
dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
drb_skb->data_len, DMA_TO_DEVICE);
}
txq->drb_wr_idx = drb_wr_idx_backup;
atomic_set(&txq->tx_processing, 0);
return -ENOMEM;
}
static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
{
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
return false;
}
return true;
}
/* Currently, only the default TX queue is used */
static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
{
struct dpmaif_tx_queue *txq;
txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
if (!txq->que_started)
return NULL;
return txq;
}
static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
{
return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
txq->drb_wr_idx, DPMAIF_WRITE);
}
static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
{
/* Normal DRB (frags data + skb linear data) + msg DRB */
return skb_shinfo(skb)->nr_frags + 2;
}
static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
{
unsigned int drb_remain_cnt, i;
unsigned int send_drb_cnt;
int drb_cnt = 0;
int ret = 0;
drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
struct sk_buff *skb;
skb = skb_peek(&txq->tx_skb_head);
if (!skb)
break;
send_drb_cnt = t7xx_skb_drb_cnt(skb);
if (drb_remain_cnt < send_drb_cnt) {
drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
continue;
}
drb_remain_cnt -= send_drb_cnt;
ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
if (ret < 0) {
dev_err(txq->dpmaif_ctrl->dev,
"Failed to add skb to device's ring: %d\n", ret);
break;
}
drb_cnt += send_drb_cnt;
skb_unlink(skb, &txq->tx_skb_head);
}
if (drb_cnt > 0)
return drb_cnt;
return ret;
}
static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
{
bool wait_disable_sleep = true;
do {
struct dpmaif_tx_queue *txq;
int drb_send_cnt;
txq = t7xx_select_tx_queue(dpmaif_ctrl);
if (!txq)
return;
drb_send_cnt = t7xx_txq_burst_send_skb(txq);
if (drb_send_cnt <= 0) {
usleep_range(10, 20);
cond_resched();
continue;
}
/* Wait for the PCIe resource to unlock */
if (wait_disable_sleep) {
if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
return;
wait_disable_sleep = false;
}
t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
cond_resched();
} while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
(dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
}
static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
{
struct dpmaif_ctrl *dpmaif_ctrl = arg;
int ret;
while (!kthread_should_stop()) {
if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
(!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
kthread_should_stop()))
continue;
if (kthread_should_stop())
break;
}
ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
if (ret < 0 && ret != -EACCES)
return ret;
t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
t7xx_do_tx_hw_push(dpmaif_ctrl);
t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
return 0;
}
int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
{
init_waitqueue_head(&dpmaif_ctrl->tx_wq);
dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
dpmaif_ctrl, "dpmaif_tx_hw_push");
return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
}
void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
{
if (dpmaif_ctrl->tx_thread)
kthread_stop(dpmaif_ctrl->tx_thread);
}
/**
* t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue.
* @dpmaif_ctrl: Pointer to struct dpmaif_ctrl.
* @txq_number: Queue number to xmit on.
* @skb: Pointer to the skb to transmit.
*
* Add the skb to the queue of the skbs to be transmit.
* Wake up the thread that push the skbs from the queue to the HW.
*
* Return:
* * 0 - Success.
* * -EBUSY - Tx budget exhausted.
* In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full
* state to prevent this error condition.
*/
int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
struct sk_buff *skb)
{
struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
struct t7xx_skb_cb *skb_cb;
if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
return -EBUSY;
}
skb_cb = T7XX_SKB_CB(skb);
skb_cb->txq_number = txq_number;
skb_queue_tail(&txq->tx_skb_head, skb);
wake_up(&dpmaif_ctrl->tx_wq);
return 0;
}
void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
{
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
if (que_mask & BIT(i))
queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
}
}
static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
{
size_t brb_skb_size, brb_pd_size;
brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
/* For HW && AP SW */
txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
&txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
if (!txq->drb_base)
return -ENOMEM;
/* For AP SW to record the skb information */
txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
if (!txq->drb_skb_base) {
dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
txq->drb_base, txq->drb_bus_addr);
return -ENOMEM;
}
return 0;
}
static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
{
struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
unsigned int i;
if (!drb_skb_base)
return;
for (i = 0; i < txq->drb_size_cnt; i++) {
drb_skb = drb_skb_base + i;
if (!drb_skb->skb)
continue;
if (!drb_skb->is_msg)
dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
drb_skb->data_len, DMA_TO_DEVICE);
if (drb_skb->is_last) {
dev_kfree_skb(drb_skb->skb);
drb_skb->skb = NULL;
}
}
}
static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
{
if (txq->drb_base)
dma_free_coherent(txq->dpmaif_ctrl->dev,
txq->drb_size_cnt * sizeof(struct dpmaif_drb),
txq->drb_base, txq->drb_bus_addr);
t7xx_dpmaif_tx_free_drb_skb(txq);
}
/**
* t7xx_dpmaif_txq_init() - Initialize TX queue.
* @txq: Pointer to struct dpmaif_tx_queue.
*
* Initialize the TX queue data structure and allocate memory for it to use.
*
* Return:
* * 0 - Success.
* * -ERROR - Error code from failure sub-initializations.
*/
int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
{
int ret;
skb_queue_head_init(&txq->tx_skb_head);
init_waitqueue_head(&txq->req_wq);
atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
ret = t7xx_dpmaif_tx_drb_buf_init(txq);
if (ret) {
dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
return ret;
}
txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI),
txq->index);
if (!txq->worker)
return -ENOMEM;
INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
spin_lock_init(&txq->tx_lock);
return 0;
}
void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
{
if (txq->worker)
destroy_workqueue(txq->worker);
skb_queue_purge(&txq->tx_skb_head);
t7xx_dpmaif_tx_drb_buf_rel(txq);
}
void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
{
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
struct dpmaif_tx_queue *txq;
int count = 0;
txq = &dpmaif_ctrl->txq[i];
txq->que_started = false;
/* Make sure TXQ is disabled */
smp_mb();
/* Wait for active Tx to be done */
while (atomic_read(&txq->tx_processing)) {
if (++count >= DPMAIF_MAX_CHECK_COUNT) {
dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
break;
}
}
}
}
static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
{
txq->que_started = false;
cancel_work_sync(&txq->dpmaif_tx_work);
flush_work(&txq->dpmaif_tx_work);
t7xx_dpmaif_tx_free_drb_skb(txq);
txq->drb_rd_idx = 0;
txq->drb_wr_idx = 0;
txq->drb_release_rd_idx = 0;
}
void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
{
int i;
for (i = 0; i < DPMAIF_TXQ_NUM; i++)
t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <[email protected]>
* Ricardo Martinez <[email protected]>
* Moises Veleta <[email protected]>
*
* Contributors:
* Amir Hanania <[email protected]>
* Chiranjeevi Rapolu <[email protected]>
* Eliot Lee <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include "t7xx_port.h"
#include "t7xx_port_proxy.h"
#include "t7xx_state_monitor.h"
#define PORT_MSG_VERSION GENMASK(31, 16)
#define PORT_MSG_PRT_CNT GENMASK(15, 0)
struct port_msg {
__le32 head_pattern;
__le32 info;
__le32 tail_pattern;
__le32 data[];
};
static int port_ctl_send_msg_to_md(struct t7xx_port *port, unsigned int msg, unsigned int ex_msg)
{
struct sk_buff *skb;
int ret;
skb = t7xx_ctrl_alloc_skb(0);
if (!skb)
return -ENOMEM;
ret = t7xx_port_send_ctl_skb(port, skb, msg, ex_msg);
if (ret)
dev_kfree_skb_any(skb);
return ret;
}
static int fsm_ee_message_handler(struct t7xx_port *port, struct t7xx_fsm_ctl *ctl,
struct sk_buff *skb)
{
struct ctrl_msg_header *ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
enum md_state md_state;
int ret = -EINVAL;
md_state = t7xx_fsm_get_md_state(ctl);
if (md_state != MD_STATE_EXCEPTION) {
dev_err(dev, "Receive invalid MD_EX %x when MD state is %d\n",
ctrl_msg_h->ex_msg, md_state);
return -EINVAL;
}
switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
case CTL_ID_MD_EX:
if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ID) {
dev_err(dev, "Receive invalid MD_EX %x\n", ctrl_msg_h->ex_msg);
break;
}
ret = port_ctl_send_msg_to_md(port, CTL_ID_MD_EX, MD_EX_CHK_ID);
if (ret) {
dev_err(dev, "Failed to send exception message to modem\n");
break;
}
ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX, NULL, 0);
if (ret)
dev_err(dev, "Failed to append Modem Exception event");
break;
case CTL_ID_MD_EX_ACK:
if (le32_to_cpu(ctrl_msg_h->ex_msg) != MD_EX_CHK_ACK_ID) {
dev_err(dev, "Receive invalid MD_EX_ACK %x\n", ctrl_msg_h->ex_msg);
break;
}
ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_REC_OK, NULL, 0);
if (ret)
dev_err(dev, "Failed to append Modem Exception Received event");
break;
case CTL_ID_MD_EX_PASS:
ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_EX_PASS, NULL, 0);
if (ret)
dev_err(dev, "Failed to append Modem Exception Passed event");
break;
case CTL_ID_DRV_VER_ERROR:
dev_err(dev, "AP/MD driver version mismatch\n");
}
return ret;
}
/**
* t7xx_port_enum_msg_handler() - Parse the port enumeration message to create/remove nodes.
* @md: Modem context.
* @msg: Message.
*
* Used to control create/remove device node.
*
* Return:
* * 0 - Success.
* * -EFAULT - Message check failure.
*/
int t7xx_port_enum_msg_handler(struct t7xx_modem *md, void *msg)
{
struct device *dev = &md->t7xx_dev->pdev->dev;
unsigned int version, port_count, i;
struct port_msg *port_msg = msg;
version = FIELD_GET(PORT_MSG_VERSION, le32_to_cpu(port_msg->info));
if (version != PORT_ENUM_VER ||
le32_to_cpu(port_msg->head_pattern) != PORT_ENUM_HEAD_PATTERN ||
le32_to_cpu(port_msg->tail_pattern) != PORT_ENUM_TAIL_PATTERN) {
dev_err(dev, "Invalid port control message %x:%x:%x\n",
version, le32_to_cpu(port_msg->head_pattern),
le32_to_cpu(port_msg->tail_pattern));
return -EFAULT;
}
port_count = FIELD_GET(PORT_MSG_PRT_CNT, le32_to_cpu(port_msg->info));
for (i = 0; i < port_count; i++) {
u32 port_info = le32_to_cpu(port_msg->data[i]);
unsigned int ch_id;
bool en_flag;
ch_id = FIELD_GET(PORT_INFO_CH_ID, port_info);
en_flag = port_info & PORT_INFO_ENFLG;
if (t7xx_port_proxy_chl_enable_disable(md->port_prox, ch_id, en_flag))
dev_dbg(dev, "Port:%x not found\n", ch_id);
}
return 0;
}
static int control_msg_handler(struct t7xx_port *port, struct sk_buff *skb)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl;
struct ctrl_msg_header *ctrl_msg_h;
int ret = 0;
ctrl_msg_h = (struct ctrl_msg_header *)skb->data;
switch (le32_to_cpu(ctrl_msg_h->ctrl_msg_id)) {
case CTL_ID_HS2_MSG:
skb_pull(skb, sizeof(*ctrl_msg_h));
if (port_conf->rx_ch == PORT_CH_CONTROL_RX ||
port_conf->rx_ch == PORT_CH_AP_CONTROL_RX) {
int event = port_conf->rx_ch == PORT_CH_CONTROL_RX ?
FSM_EVENT_MD_HS2 : FSM_EVENT_AP_HS2;
ret = t7xx_fsm_append_event(ctl, event, skb->data,
le32_to_cpu(ctrl_msg_h->data_length));
if (ret)
dev_err(port->dev, "Failed to append Handshake 2 event");
}
dev_kfree_skb_any(skb);
break;
case CTL_ID_MD_EX:
case CTL_ID_MD_EX_ACK:
case CTL_ID_MD_EX_PASS:
case CTL_ID_DRV_VER_ERROR:
ret = fsm_ee_message_handler(port, ctl, skb);
dev_kfree_skb_any(skb);
break;
case CTL_ID_PORT_ENUM:
skb_pull(skb, sizeof(*ctrl_msg_h));
ret = t7xx_port_enum_msg_handler(ctl->md, (struct port_msg *)skb->data);
if (!ret)
ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM, 0);
else
ret = port_ctl_send_msg_to_md(port, CTL_ID_PORT_ENUM,
PORT_ENUM_VER_MISMATCH);
break;
default:
ret = -EINVAL;
dev_err(port->dev, "Unknown control message ID to FSM %x\n",
le32_to_cpu(ctrl_msg_h->ctrl_msg_id));
break;
}
if (ret)
dev_err(port->dev, "%s control message handle error: %d\n", port_conf->name, ret);
return ret;
}
static int port_ctl_rx_thread(void *arg)
{
while (!kthread_should_stop()) {
struct t7xx_port *port = arg;
struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&port->rx_wq.lock, flags);
if (skb_queue_empty(&port->rx_skb_list) &&
wait_event_interruptible_locked_irq(port->rx_wq,
!skb_queue_empty(&port->rx_skb_list) ||
kthread_should_stop())) {
spin_unlock_irqrestore(&port->rx_wq.lock, flags);
continue;
}
if (kthread_should_stop()) {
spin_unlock_irqrestore(&port->rx_wq.lock, flags);
break;
}
skb = __skb_dequeue(&port->rx_skb_list);
spin_unlock_irqrestore(&port->rx_wq.lock, flags);
control_msg_handler(port, skb);
}
return 0;
}
static int port_ctl_init(struct t7xx_port *port)
{
const struct t7xx_port_conf *port_conf = port->port_conf;
port->thread = kthread_run(port_ctl_rx_thread, port, "%s", port_conf->name);
if (IS_ERR(port->thread)) {
dev_err(port->dev, "Failed to start port control thread\n");
return PTR_ERR(port->thread);
}
port->rx_length_th = CTRL_QUEUE_MAXLEN;
return 0;
}
static void port_ctl_uninit(struct t7xx_port *port)
{
unsigned long flags;
struct sk_buff *skb;
if (port->thread)
kthread_stop(port->thread);
spin_lock_irqsave(&port->rx_wq.lock, flags);
port->rx_length_th = 0;
while ((skb = __skb_dequeue(&port->rx_skb_list)) != NULL)
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&port->rx_wq.lock, flags);
}
struct port_ops ctl_port_ops = {
.init = port_ctl_init,
.recv_skb = t7xx_port_enqueue_skb,
.uninit = port_ctl_uninit,
};
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, MediaTek Inc.
* Copyright (c) 2021-2022, Intel Corporation.
*
* Authors:
* Haijun Liu <[email protected]>
* Moises Veleta <[email protected]>
* Ricardo Martinez <[email protected]>
*
* Contributors:
* Amir Hanania <[email protected]>
* Andy Shevchenko <[email protected]>
* Eliot Lee <[email protected]>
* Sreehari Kancharla <[email protected]>
*/
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/types.h>
#include "t7xx_cldma.h"
#define ADDR_SIZE 8
void t7xx_cldma_clear_ip_busy(struct t7xx_cldma_hw *hw_info)
{
u32 val;
val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
val |= IP_BUSY_WAKEUP;
iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_IP_BUSY);
}
/**
* t7xx_cldma_hw_restore() - Restore CLDMA HW registers.
* @hw_info: Pointer to struct t7xx_cldma_hw.
*
* Restore HW after resume. Writes uplink configuration for CLDMA HW.
*/
void t7xx_cldma_hw_restore(struct t7xx_cldma_hw *hw_info)
{
u32 ul_cfg;
ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
if (hw_info->hw_mode == MODE_BIT_64)
ul_cfg |= UL_CFG_BIT_MODE_64;
else if (hw_info->hw_mode == MODE_BIT_40)
ul_cfg |= UL_CFG_BIT_MODE_40;
else if (hw_info->hw_mode == MODE_BIT_36)
ul_cfg |= UL_CFG_BIT_MODE_36;
iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
/* Disable TX and RX invalid address check */
iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
}
void t7xx_cldma_hw_start_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_START_CMD :
hw_info->ap_pdn_base + REG_CLDMA_UL_START_CMD;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val, reg);
}
void t7xx_cldma_hw_start(struct t7xx_cldma_hw *hw_info)
{
/* Enable the TX & RX interrupts */
iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
iowrite32(TXRX_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
/* Enable the empty queue interrupt */
iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0);
iowrite32(EMPTY_STATUS_BITMASK, hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0);
}
void t7xx_cldma_hw_reset(void __iomem *ao_base)
{
u32 val;
val = ioread32(ao_base + REG_INFRA_RST2_SET);
val |= RST2_PMIC_SW_RST_SET;
iowrite32(val, ao_base + REG_INFRA_RST2_SET);
val = ioread32(ao_base + REG_INFRA_RST4_SET);
val |= RST4_CLDMA1_SW_RST_SET;
iowrite32(val, ao_base + REG_INFRA_RST4_SET);
udelay(1);
val = ioread32(ao_base + REG_INFRA_RST4_CLR);
val |= RST4_CLDMA1_SW_RST_CLR;
iowrite32(val, ao_base + REG_INFRA_RST4_CLR);
val = ioread32(ao_base + REG_INFRA_RST2_CLR);
val |= RST2_PMIC_SW_RST_CLR;
iowrite32(val, ao_base + REG_INFRA_RST2_CLR);
}
bool t7xx_cldma_tx_addr_is_set(struct t7xx_cldma_hw *hw_info, unsigned int qno)
{
u32 offset = REG_CLDMA_UL_START_ADDRL_0 + qno * ADDR_SIZE;
return ioread64(hw_info->ap_pdn_base + offset);
}
void t7xx_cldma_hw_set_start_addr(struct t7xx_cldma_hw *hw_info, unsigned int qno, u64 address,
enum mtk_txrx tx_rx)
{
u32 offset = qno * ADDR_SIZE;
void __iomem *reg;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_START_ADDRL_0 :
hw_info->ap_pdn_base + REG_CLDMA_UL_START_ADDRL_0;
iowrite64(address, reg + offset);
}
void t7xx_cldma_hw_resume_queue(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *base = hw_info->ap_pdn_base;
if (tx_rx == MTK_RX)
iowrite32(BIT(qno), base + REG_CLDMA_DL_RESUME_CMD);
else
iowrite32(BIT(qno), base + REG_CLDMA_UL_RESUME_CMD);
}
unsigned int t7xx_cldma_hw_queue_status(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 mask, val;
mask = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_DL_STATUS :
hw_info->ap_pdn_base + REG_CLDMA_UL_STATUS;
val = ioread32(reg);
return val & mask;
}
void t7xx_cldma_hw_tx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
{
unsigned int ch_id;
ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
ch_id &= bitmask;
/* Clear the ch IDs in the TX interrupt status register */
iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
}
void t7xx_cldma_hw_rx_done(struct t7xx_cldma_hw *hw_info, unsigned int bitmask)
{
unsigned int ch_id;
ch_id = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
ch_id &= bitmask;
/* Clear the ch IDs in the RX interrupt status register */
iowrite32(ch_id, hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
}
unsigned int t7xx_cldma_hw_int_status(struct t7xx_cldma_hw *hw_info, unsigned int bitmask,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0;
val = ioread32(reg);
return val & bitmask;
}
void t7xx_cldma_hw_irq_dis_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val, reg);
}
void t7xx_cldma_hw_irq_dis_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val << EQ_STA_BIT_OFFSET, reg);
}
void t7xx_cldma_hw_irq_en_txrx(struct t7xx_cldma_hw *hw_info, unsigned int qno,
enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val, reg);
}
void t7xx_cldma_hw_irq_en_eq(struct t7xx_cldma_hw *hw_info, unsigned int qno, enum mtk_txrx tx_rx)
{
void __iomem *reg;
u32 val;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMCR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMCR0;
val = qno == CLDMA_ALL_Q ? CLDMA_ALL_Q : BIT(qno);
iowrite32(val << EQ_STA_BIT_OFFSET, reg);
}
/**
* t7xx_cldma_hw_init() - Initialize CLDMA HW.
* @hw_info: Pointer to struct t7xx_cldma_hw.
*
* Write uplink and downlink configuration to CLDMA HW.
*/
void t7xx_cldma_hw_init(struct t7xx_cldma_hw *hw_info)
{
u32 ul_cfg, dl_cfg;
ul_cfg = ioread32(hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
dl_cfg = ioread32(hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
/* Configure the DRAM address mode */
ul_cfg &= ~UL_CFG_BIT_MODE_MASK;
dl_cfg &= ~DL_CFG_BIT_MODE_MASK;
if (hw_info->hw_mode == MODE_BIT_64) {
ul_cfg |= UL_CFG_BIT_MODE_64;
dl_cfg |= DL_CFG_BIT_MODE_64;
} else if (hw_info->hw_mode == MODE_BIT_40) {
ul_cfg |= UL_CFG_BIT_MODE_40;
dl_cfg |= DL_CFG_BIT_MODE_40;
} else if (hw_info->hw_mode == MODE_BIT_36) {
ul_cfg |= UL_CFG_BIT_MODE_36;
dl_cfg |= DL_CFG_BIT_MODE_36;
}
iowrite32(ul_cfg, hw_info->ap_pdn_base + REG_CLDMA_UL_CFG);
dl_cfg |= DL_CFG_UP_HW_LAST;
iowrite32(dl_cfg, hw_info->ap_ao_base + REG_CLDMA_DL_CFG);
iowrite32(0, hw_info->ap_ao_base + REG_CLDMA_INT_MASK);
iowrite32(BUSY_MASK_MD, hw_info->ap_ao_base + REG_CLDMA_BUSY_MASK);
iowrite32(UL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_UL_MEM);
iowrite32(DL_MEM_CHECK_DIS, hw_info->ap_pdn_base + REG_CLDMA_DL_MEM);
}
void t7xx_cldma_hw_stop_all_qs(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
{
void __iomem *reg;
reg = tx_rx == MTK_RX ? hw_info->ap_pdn_base + REG_CLDMA_DL_STOP_CMD :
hw_info->ap_pdn_base + REG_CLDMA_UL_STOP_CMD;
iowrite32(CLDMA_ALL_Q, reg);
}
void t7xx_cldma_hw_stop(struct t7xx_cldma_hw *hw_info, enum mtk_txrx tx_rx)
{
void __iomem *reg;
reg = tx_rx == MTK_RX ? hw_info->ap_ao_base + REG_CLDMA_L2RIMSR0 :
hw_info->ap_pdn_base + REG_CLDMA_L2TIMSR0;
iowrite32(TXRX_STATUS_BITMASK, reg);
iowrite32(EMPTY_STATUS_BITMASK, reg);
}
|
linux-master
|
drivers/net/wwan/t7xx/t7xx_cldma.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* FUJITSU Extended Socket Network Device driver
* Copyright (c) 2015-2016 FUJITSU LIMITED
*/
#include <linux/module.h>
#ifndef __CHECKER__
#include "fjes_hw.h"
#define CREATE_TRACE_POINTS
#include "fjes_trace.h"
#endif /* __CHECKER__ */
|
linux-master
|
drivers/net/fjes/fjes_trace.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* FUJITSU Extended Socket Network Device driver
* Copyright (c) 2015 FUJITSU LIMITED
*/
#include "fjes_hw.h"
#include "fjes.h"
#include "fjes_trace.h"
static void fjes_hw_update_zone_task(struct work_struct *);
static void fjes_hw_epstop_task(struct work_struct *);
/* supported MTU list */
const u32 fjes_support_mtu[] = {
FJES_MTU_DEFINE(8 * 1024),
FJES_MTU_DEFINE(16 * 1024),
FJES_MTU_DEFINE(32 * 1024),
FJES_MTU_DEFINE(64 * 1024),
0
};
u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
{
u8 *base = hw->base;
u32 value = 0;
value = readl(&base[reg]);
return value;
}
static u8 *fjes_hw_iomap(struct fjes_hw *hw)
{
u8 *base;
if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
fjes_driver_name)) {
pr_err("request_mem_region failed\n");
return NULL;
}
base = (u8 *)ioremap(hw->hw_res.start, hw->hw_res.size);
return base;
}
static void fjes_hw_iounmap(struct fjes_hw *hw)
{
iounmap(hw->base);
release_mem_region(hw->hw_res.start, hw->hw_res.size);
}
int fjes_hw_reset(struct fjes_hw *hw)
{
union REG_DCTL dctl;
int timeout;
dctl.reg = 0;
dctl.bits.reset = 1;
wr32(XSCT_DCTL, dctl.reg);
timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
dctl.reg = rd32(XSCT_DCTL);
while ((dctl.bits.reset == 1) && (timeout > 0)) {
msleep(1000);
dctl.reg = rd32(XSCT_DCTL);
timeout -= 1000;
}
return timeout > 0 ? 0 : -EIO;
}
static int fjes_hw_get_max_epid(struct fjes_hw *hw)
{
union REG_MAX_EP info;
info.reg = rd32(XSCT_MAX_EP);
return info.bits.maxep;
}
static int fjes_hw_get_my_epid(struct fjes_hw *hw)
{
union REG_OWNER_EPID info;
info.reg = rd32(XSCT_OWNER_EPID);
return info.bits.epid;
}
static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
{
size_t size;
size = sizeof(struct fjes_device_shared_info) +
(sizeof(u8) * hw->max_epid);
hw->hw_info.share = kzalloc(size, GFP_KERNEL);
if (!hw->hw_info.share)
return -ENOMEM;
hw->hw_info.share->epnum = hw->max_epid;
return 0;
}
static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
{
kfree(hw->hw_info.share);
hw->hw_info.share = NULL;
}
static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
{
void *mem;
mem = vzalloc(EP_BUFFER_SIZE);
if (!mem)
return -ENOMEM;
epbh->buffer = mem;
epbh->size = EP_BUFFER_SIZE;
epbh->info = (union ep_buffer_info *)mem;
epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
return 0;
}
static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
{
vfree(epbh->buffer);
epbh->buffer = NULL;
epbh->size = 0;
epbh->info = NULL;
epbh->ring = NULL;
}
void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, const u8 *mac_addr,
u32 mtu)
{
union ep_buffer_info *info = epbh->info;
u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
int i;
for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
vlan_id[i] = info->v1i.vlan_id[i];
memset(info, 0, sizeof(union ep_buffer_info));
info->v1i.version = 0; /* version 0 */
for (i = 0; i < ETH_ALEN; i++)
info->v1i.mac_addr[i] = mac_addr[i];
info->v1i.head = 0;
info->v1i.tail = 1;
info->v1i.info_size = sizeof(union ep_buffer_info);
info->v1i.buffer_size = epbh->size - info->v1i.info_size;
info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
info->v1i.count_max =
EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
info->v1i.vlan_id[i] = vlan_id[i];
info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
}
void
fjes_hw_init_command_registers(struct fjes_hw *hw,
struct fjes_device_command_param *param)
{
/* Request Buffer length */
wr32(XSCT_REQBL, (__le32)(param->req_len));
/* Response Buffer Length */
wr32(XSCT_RESPBL, (__le32)(param->res_len));
/* Request Buffer Address */
wr32(XSCT_REQBAL,
(__le32)(param->req_start & GENMASK_ULL(31, 0)));
wr32(XSCT_REQBAH,
(__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
/* Response Buffer Address */
wr32(XSCT_RESPBAL,
(__le32)(param->res_start & GENMASK_ULL(31, 0)));
wr32(XSCT_RESPBAH,
(__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
/* Share status address */
wr32(XSCT_SHSTSAL,
(__le32)(param->share_start & GENMASK_ULL(31, 0)));
wr32(XSCT_SHSTSAH,
(__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
}
static int fjes_hw_setup(struct fjes_hw *hw)
{
u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
struct fjes_device_command_param param;
struct ep_share_mem_info *buf_pair;
unsigned long flags;
size_t mem_size;
int result;
int epidx;
void *buf;
hw->hw_info.max_epid = &hw->max_epid;
hw->hw_info.my_epid = &hw->my_epid;
buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
GFP_KERNEL);
if (!buf)
return -ENOMEM;
hw->ep_shm_info = (struct ep_share_mem_info *)buf;
mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
if (!(hw->hw_info.req_buf))
return -ENOMEM;
hw->hw_info.req_buf_size = mem_size;
mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
if (!(hw->hw_info.res_buf))
return -ENOMEM;
hw->hw_info.res_buf_size = mem_size;
result = fjes_hw_alloc_shared_status_region(hw);
if (result)
return result;
hw->hw_info.buffer_share_bit = 0;
hw->hw_info.buffer_unshare_reserve_bit = 0;
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx != hw->my_epid) {
buf_pair = &hw->ep_shm_info[epidx];
result = fjes_hw_alloc_epbuf(&buf_pair->tx);
if (result)
return result;
result = fjes_hw_alloc_epbuf(&buf_pair->rx);
if (result)
return result;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
fjes_support_mtu[0]);
fjes_hw_setup_epbuf(&buf_pair->rx, mac,
fjes_support_mtu[0]);
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
}
memset(¶m, 0, sizeof(param));
param.req_len = hw->hw_info.req_buf_size;
param.req_start = __pa(hw->hw_info.req_buf);
param.res_len = hw->hw_info.res_buf_size;
param.res_start = __pa(hw->hw_info.res_buf);
param.share_start = __pa(hw->hw_info.share->ep_status);
fjes_hw_init_command_registers(hw, ¶m);
return 0;
}
static void fjes_hw_cleanup(struct fjes_hw *hw)
{
int epidx;
if (!hw->ep_shm_info)
return;
fjes_hw_free_shared_status_region(hw);
kfree(hw->hw_info.req_buf);
hw->hw_info.req_buf = NULL;
kfree(hw->hw_info.res_buf);
hw->hw_info.res_buf = NULL;
for (epidx = 0; epidx < hw->max_epid ; epidx++) {
if (epidx == hw->my_epid)
continue;
fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
}
kfree(hw->ep_shm_info);
hw->ep_shm_info = NULL;
}
int fjes_hw_init(struct fjes_hw *hw)
{
int ret;
hw->base = fjes_hw_iomap(hw);
if (!hw->base)
return -EIO;
ret = fjes_hw_reset(hw);
if (ret)
return ret;
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
mutex_init(&hw->hw_info.lock);
spin_lock_init(&hw->rx_status_lock);
hw->max_epid = fjes_hw_get_max_epid(hw);
hw->my_epid = fjes_hw_get_my_epid(hw);
if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
return -ENXIO;
ret = fjes_hw_setup(hw);
hw->hw_info.trace = vzalloc(FJES_DEBUG_BUFFER_SIZE);
hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
return ret;
}
void fjes_hw_exit(struct fjes_hw *hw)
{
int ret;
if (hw->base) {
if (hw->debug_mode) {
/* disable debug mode */
mutex_lock(&hw->hw_info.lock);
fjes_hw_stop_debug(hw);
mutex_unlock(&hw->hw_info.lock);
}
vfree(hw->hw_info.trace);
hw->hw_info.trace = NULL;
hw->hw_info.trace_size = 0;
hw->debug_mode = 0;
ret = fjes_hw_reset(hw);
if (ret)
pr_err("%s: reset error", __func__);
fjes_hw_iounmap(hw);
hw->base = NULL;
}
fjes_hw_cleanup(hw);
cancel_work_sync(&hw->update_zone_task);
cancel_work_sync(&hw->epstop_task);
}
static enum fjes_dev_command_response_e
fjes_hw_issue_request_command(struct fjes_hw *hw,
enum fjes_dev_command_request_type type)
{
enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
union REG_CR cr;
union REG_CS cs;
int timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
cr.reg = 0;
cr.bits.req_start = 1;
cr.bits.req_code = type;
wr32(XSCT_CR, cr.reg);
cr.reg = rd32(XSCT_CR);
if (cr.bits.error == 0) {
timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
cs.reg = rd32(XSCT_CS);
while ((cs.bits.complete != 1) && timeout > 0) {
msleep(1000);
cs.reg = rd32(XSCT_CS);
timeout -= 1000;
}
if (cs.bits.complete == 1)
ret = FJES_CMD_STATUS_NORMAL;
else if (timeout <= 0)
ret = FJES_CMD_STATUS_TIMEOUT;
} else {
switch (cr.bits.err_info) {
case FJES_CMD_REQ_ERR_INFO_PARAM:
ret = FJES_CMD_STATUS_ERROR_PARAM;
break;
case FJES_CMD_REQ_ERR_INFO_STATUS:
ret = FJES_CMD_STATUS_ERROR_STATUS;
break;
default:
ret = FJES_CMD_STATUS_UNKNOWN;
break;
}
}
trace_fjes_hw_issue_request_command(&cr, &cs, timeout, ret);
return ret;
}
int fjes_hw_request_info(struct fjes_hw *hw)
{
union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
enum fjes_dev_command_response_e ret;
int result;
memset(req_buf, 0, hw->hw_info.req_buf_size);
memset(res_buf, 0, hw->hw_info.res_buf_size);
req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
res_buf->info.length = 0;
res_buf->info.code = 0;
ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
trace_fjes_hw_request_info(hw, res_buf);
result = 0;
if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
res_buf->info.length) {
trace_fjes_hw_request_info_err("Invalid res_buf");
result = -ENOMSG;
} else if (ret == FJES_CMD_STATUS_NORMAL) {
switch (res_buf->info.code) {
case FJES_CMD_REQ_RES_CODE_NORMAL:
result = 0;
break;
default:
result = -EPERM;
break;
}
} else {
switch (ret) {
case FJES_CMD_STATUS_UNKNOWN:
result = -EPERM;
break;
case FJES_CMD_STATUS_TIMEOUT:
trace_fjes_hw_request_info_err("Timeout");
result = -EBUSY;
break;
case FJES_CMD_STATUS_ERROR_PARAM:
result = -EPERM;
break;
case FJES_CMD_STATUS_ERROR_STATUS:
result = -EPERM;
break;
default:
result = -EPERM;
break;
}
}
return result;
}
int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
struct ep_share_mem_info *buf_pair)
{
union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
enum fjes_dev_command_response_e ret;
int page_count;
int timeout;
int i, idx;
void *addr;
int result;
if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
return 0;
memset(req_buf, 0, hw->hw_info.req_buf_size);
memset(res_buf, 0, hw->hw_info.res_buf_size);
req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
buf_pair->tx.size,
buf_pair->rx.size);
req_buf->share_buffer.epid = dest_epid;
idx = 0;
req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
for (i = 0; i < page_count; i++) {
addr = ((u8 *)(buf_pair->tx.buffer)) +
(i * EP_BUFFER_INFO_SIZE);
req_buf->share_buffer.buffer[idx++] =
(__le64)(page_to_phys(vmalloc_to_page(addr)) +
offset_in_page(addr));
}
req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
for (i = 0; i < page_count; i++) {
addr = ((u8 *)(buf_pair->rx.buffer)) +
(i * EP_BUFFER_INFO_SIZE);
req_buf->share_buffer.buffer[idx++] =
(__le64)(page_to_phys(vmalloc_to_page(addr)) +
offset_in_page(addr));
}
res_buf->share_buffer.length = 0;
res_buf->share_buffer.code = 0;
trace_fjes_hw_register_buff_addr_req(req_buf, buf_pair);
ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
while ((ret == FJES_CMD_STATUS_NORMAL) &&
(res_buf->share_buffer.length ==
FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
(res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
(timeout > 0)) {
msleep(200 + hw->my_epid * 20);
timeout -= (200 + hw->my_epid * 20);
res_buf->share_buffer.length = 0;
res_buf->share_buffer.code = 0;
ret = fjes_hw_issue_request_command(
hw, FJES_CMD_REQ_SHARE_BUFFER);
}
result = 0;
trace_fjes_hw_register_buff_addr(res_buf, timeout);
if (res_buf->share_buffer.length !=
FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) {
trace_fjes_hw_register_buff_addr_err("Invalid res_buf");
result = -ENOMSG;
} else if (ret == FJES_CMD_STATUS_NORMAL) {
switch (res_buf->share_buffer.code) {
case FJES_CMD_REQ_RES_CODE_NORMAL:
result = 0;
set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
break;
case FJES_CMD_REQ_RES_CODE_BUSY:
trace_fjes_hw_register_buff_addr_err("Busy Timeout");
result = -EBUSY;
break;
default:
result = -EPERM;
break;
}
} else {
switch (ret) {
case FJES_CMD_STATUS_UNKNOWN:
result = -EPERM;
break;
case FJES_CMD_STATUS_TIMEOUT:
trace_fjes_hw_register_buff_addr_err("Timeout");
result = -EBUSY;
break;
case FJES_CMD_STATUS_ERROR_PARAM:
case FJES_CMD_STATUS_ERROR_STATUS:
default:
result = -EPERM;
break;
}
}
return result;
}
int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
{
union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
struct fjes_device_shared_info *share = hw->hw_info.share;
enum fjes_dev_command_response_e ret;
int timeout;
int result;
if (!hw->base)
return -EPERM;
if (!req_buf || !res_buf || !share)
return -EPERM;
if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
return 0;
memset(req_buf, 0, hw->hw_info.req_buf_size);
memset(res_buf, 0, hw->hw_info.res_buf_size);
req_buf->unshare_buffer.length =
FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
req_buf->unshare_buffer.epid = dest_epid;
res_buf->unshare_buffer.length = 0;
res_buf->unshare_buffer.code = 0;
trace_fjes_hw_unregister_buff_addr_req(req_buf);
ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
while ((ret == FJES_CMD_STATUS_NORMAL) &&
(res_buf->unshare_buffer.length ==
FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
(res_buf->unshare_buffer.code ==
FJES_CMD_REQ_RES_CODE_BUSY) &&
(timeout > 0)) {
msleep(200 + hw->my_epid * 20);
timeout -= (200 + hw->my_epid * 20);
res_buf->unshare_buffer.length = 0;
res_buf->unshare_buffer.code = 0;
ret =
fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
}
result = 0;
trace_fjes_hw_unregister_buff_addr(res_buf, timeout);
if (res_buf->unshare_buffer.length !=
FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
trace_fjes_hw_unregister_buff_addr_err("Invalid res_buf");
result = -ENOMSG;
} else if (ret == FJES_CMD_STATUS_NORMAL) {
switch (res_buf->unshare_buffer.code) {
case FJES_CMD_REQ_RES_CODE_NORMAL:
result = 0;
clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
break;
case FJES_CMD_REQ_RES_CODE_BUSY:
trace_fjes_hw_unregister_buff_addr_err("Busy Timeout");
result = -EBUSY;
break;
default:
result = -EPERM;
break;
}
} else {
switch (ret) {
case FJES_CMD_STATUS_UNKNOWN:
result = -EPERM;
break;
case FJES_CMD_STATUS_TIMEOUT:
trace_fjes_hw_unregister_buff_addr_err("Timeout");
result = -EBUSY;
break;
case FJES_CMD_STATUS_ERROR_PARAM:
case FJES_CMD_STATUS_ERROR_STATUS:
default:
result = -EPERM;
break;
}
}
return result;
}
int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
enum REG_ICTL_MASK mask)
{
u32 ig = mask | dest_epid;
wr32(XSCT_IG, cpu_to_le32(ig));
return 0;
}
u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
{
u32 cur_is;
cur_is = rd32(XSCT_IS);
return cur_is;
}
void fjes_hw_set_irqmask(struct fjes_hw *hw,
enum REG_ICTL_MASK intr_mask, bool mask)
{
if (mask)
wr32(XSCT_IMS, intr_mask);
else
wr32(XSCT_IMC, intr_mask);
}
bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
{
if (epid >= hw->max_epid)
return false;
if ((hw->ep_shm_info[epid].es_status !=
FJES_ZONING_STATUS_ENABLE) ||
(hw->ep_shm_info[hw->my_epid].zone ==
FJES_ZONING_ZONE_TYPE_NONE))
return false;
else
return (hw->ep_shm_info[epid].zone ==
hw->ep_shm_info[hw->my_epid].zone);
}
int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
int dest_epid)
{
int value = false;
if (dest_epid < share->epnum)
value = share->ep_status[dest_epid];
return value;
}
static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
{
return test_bit(src_epid, &hw->txrx_stop_req_bit);
}
static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
{
return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
FJES_RX_STOP_REQ_DONE);
}
enum ep_partner_status
fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
{
enum ep_partner_status status;
if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
if (fjes_hw_epid_is_stop_requested(hw, epid)) {
status = EP_PARTNER_WAITING;
} else {
if (fjes_hw_epid_is_stop_process_done(hw, epid))
status = EP_PARTNER_COMPLETE;
else
status = EP_PARTNER_SHARED;
}
} else {
status = EP_PARTNER_UNSHARE;
}
return status;
}
void fjes_hw_raise_epstop(struct fjes_hw *hw)
{
enum ep_partner_status status;
unsigned long flags;
int epidx;
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
status = fjes_hw_get_partner_ep_status(hw, epidx);
switch (status) {
case EP_PARTNER_SHARED:
fjes_hw_raise_interrupt(hw, epidx,
REG_ICTL_MASK_TXRX_STOP_REQ);
hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
break;
default:
break;
}
set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
set_bit(epidx, &hw->txrx_stop_req_bit);
spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_REQUEST;
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
}
int fjes_hw_wait_epstop(struct fjes_hw *hw)
{
enum ep_partner_status status;
union ep_buffer_info *info;
int wait_time = 0;
int epidx;
while (hw->hw_info.buffer_unshare_reserve_bit &&
(wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
status = fjes_hw_epid_is_shared(hw->hw_info.share,
epidx);
info = hw->ep_shm_info[epidx].rx.info;
if ((!status ||
(info->v1i.rx_status &
FJES_RX_STOP_REQ_DONE)) &&
test_bit(epidx,
&hw->hw_info.buffer_unshare_reserve_bit)) {
clear_bit(epidx,
&hw->hw_info.buffer_unshare_reserve_bit);
}
}
msleep(100);
wait_time += 100;
}
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
clear_bit(epidx,
&hw->hw_info.buffer_unshare_reserve_bit);
}
return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
? 0 : -EBUSY;
}
bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
{
union ep_buffer_info *info = epbh->info;
return (info->common.version == version);
}
bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
{
union ep_buffer_info *info = epbh->info;
return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
}
bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
{
union ep_buffer_info *info = epbh->info;
bool ret = false;
int i;
if (vlan_id == 0) {
ret = true;
} else {
for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
if (vlan_id == info->v1i.vlan_id[i]) {
ret = true;
break;
}
}
}
return ret;
}
bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
{
union ep_buffer_info *info = epbh->info;
int i;
for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
if (info->v1i.vlan_id[i] == 0) {
info->v1i.vlan_id[i] = vlan_id;
return true;
}
}
return false;
}
void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
{
union ep_buffer_info *info = epbh->info;
int i;
if (0 != vlan_id) {
for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
if (vlan_id == info->v1i.vlan_id[i])
info->v1i.vlan_id[i] = 0;
}
}
}
bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
{
union ep_buffer_info *info = epbh->info;
if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
return true;
if (info->v1i.count_max == 0)
return true;
return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
info->v1i.count_max);
}
void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
size_t *psize)
{
union ep_buffer_info *info = epbh->info;
struct esmem_frame *ring_frame;
void *frame;
ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
(info->v1i.head,
info->v1i.count_max) *
info->v1i.frame_max]);
*psize = (size_t)ring_frame->frame_size;
frame = ring_frame->frame_data;
return frame;
}
void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
{
union ep_buffer_info *info = epbh->info;
if (fjes_hw_epbuf_rx_is_empty(epbh))
return;
EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
}
int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
void *frame, size_t size)
{
union ep_buffer_info *info = epbh->info;
struct esmem_frame *ring_frame;
if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
return -ENOBUFS;
ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
(info->v1i.tail - 1,
info->v1i.count_max) *
info->v1i.frame_max]);
ring_frame->frame_size = size;
memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
return 0;
}
static void fjes_hw_update_zone_task(struct work_struct *work)
{
struct fjes_hw *hw = container_of(work,
struct fjes_hw, update_zone_task);
struct my_s {u8 es_status; u8 zone; } *info;
union fjes_device_command_res *res_buf;
enum ep_partner_status pstatus;
struct fjes_adapter *adapter;
struct net_device *netdev;
unsigned long flags;
ulong unshare_bit = 0;
ulong share_bit = 0;
ulong irq_bit = 0;
int epidx;
int ret;
adapter = (struct fjes_adapter *)hw->back;
netdev = adapter->netdev;
res_buf = hw->hw_info.res_buf;
info = (struct my_s *)&res_buf->info.info;
mutex_lock(&hw->hw_info.lock);
ret = fjes_hw_request_info(hw);
switch (ret) {
case -ENOMSG:
case -EBUSY:
default:
if (!work_pending(&adapter->force_close_task)) {
adapter->force_reset = true;
schedule_work(&adapter->force_close_task);
}
break;
case 0:
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid) {
hw->ep_shm_info[epidx].es_status =
info[epidx].es_status;
hw->ep_shm_info[epidx].zone =
info[epidx].zone;
continue;
}
pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
switch (pstatus) {
case EP_PARTNER_UNSHARE:
default:
if ((info[epidx].zone !=
FJES_ZONING_ZONE_TYPE_NONE) &&
(info[epidx].es_status ==
FJES_ZONING_STATUS_ENABLE) &&
(info[epidx].zone ==
info[hw->my_epid].zone))
set_bit(epidx, &share_bit);
else
set_bit(epidx, &unshare_bit);
break;
case EP_PARTNER_COMPLETE:
case EP_PARTNER_WAITING:
if ((info[epidx].zone ==
FJES_ZONING_ZONE_TYPE_NONE) ||
(info[epidx].es_status !=
FJES_ZONING_STATUS_ENABLE) ||
(info[epidx].zone !=
info[hw->my_epid].zone)) {
set_bit(epidx,
&adapter->unshare_watch_bitmask);
set_bit(epidx,
&hw->hw_info.buffer_unshare_reserve_bit);
}
break;
case EP_PARTNER_SHARED:
if ((info[epidx].zone ==
FJES_ZONING_ZONE_TYPE_NONE) ||
(info[epidx].es_status !=
FJES_ZONING_STATUS_ENABLE) ||
(info[epidx].zone !=
info[hw->my_epid].zone))
set_bit(epidx, &irq_bit);
break;
}
hw->ep_shm_info[epidx].es_status =
info[epidx].es_status;
hw->ep_shm_info[epidx].zone = info[epidx].zone;
}
break;
}
mutex_unlock(&hw->hw_info.lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
if (test_bit(epidx, &share_bit)) {
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
mutex_lock(&hw->hw_info.lock);
ret = fjes_hw_register_buff_addr(
hw, epidx, &hw->ep_shm_info[epidx]);
switch (ret) {
case 0:
break;
case -ENOMSG:
case -EBUSY:
default:
if (!work_pending(&adapter->force_close_task)) {
adapter->force_reset = true;
schedule_work(
&adapter->force_close_task);
}
break;
}
mutex_unlock(&hw->hw_info.lock);
hw->ep_shm_info[epidx].ep_stats
.com_regist_buf_exec += 1;
}
if (test_bit(epidx, &unshare_bit)) {
mutex_lock(&hw->hw_info.lock);
ret = fjes_hw_unregister_buff_addr(hw, epidx);
switch (ret) {
case 0:
break;
case -ENOMSG:
case -EBUSY:
default:
if (!work_pending(&adapter->force_close_task)) {
adapter->force_reset = true;
schedule_work(
&adapter->force_close_task);
}
break;
}
mutex_unlock(&hw->hw_info.lock);
hw->ep_shm_info[epidx].ep_stats
.com_unregist_buf_exec += 1;
if (ret == 0) {
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
spin_unlock_irqrestore(&hw->rx_status_lock,
flags);
}
}
if (test_bit(epidx, &irq_bit)) {
fjes_hw_raise_interrupt(hw, epidx,
REG_ICTL_MASK_TXRX_STOP_REQ);
hw->ep_shm_info[epidx].ep_stats.send_intr_unshare += 1;
set_bit(epidx, &hw->txrx_stop_req_bit);
spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.
info->v1i.rx_status |=
FJES_RX_STOP_REQ_REQUEST;
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
}
}
if (irq_bit || adapter->unshare_watch_bitmask) {
if (!work_pending(&adapter->unshare_watch_task))
queue_work(adapter->control_wq,
&adapter->unshare_watch_task);
}
}
static void fjes_hw_epstop_task(struct work_struct *work)
{
struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
unsigned long flags;
ulong remain_bit;
int epid_bit;
while ((remain_bit = hw->epstop_req_bit)) {
for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
if (remain_bit & 1) {
spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epid_bit].
tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
spin_unlock_irqrestore(&hw->rx_status_lock,
flags);
clear_bit(epid_bit, &hw->epstop_req_bit);
set_bit(epid_bit,
&adapter->unshare_watch_bitmask);
if (!work_pending(&adapter->unshare_watch_task))
queue_work(
adapter->control_wq,
&adapter->unshare_watch_task);
}
}
}
}
int fjes_hw_start_debug(struct fjes_hw *hw)
{
union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
enum fjes_dev_command_response_e ret;
int page_count;
int result = 0;
void *addr;
int i;
if (!hw->hw_info.trace)
return -EPERM;
memset(hw->hw_info.trace, 0, FJES_DEBUG_BUFFER_SIZE);
memset(req_buf, 0, hw->hw_info.req_buf_size);
memset(res_buf, 0, hw->hw_info.res_buf_size);
req_buf->start_trace.length =
FJES_DEV_COMMAND_START_DBG_REQ_LEN(hw->hw_info.trace_size);
req_buf->start_trace.mode = hw->debug_mode;
req_buf->start_trace.buffer_len = hw->hw_info.trace_size;
page_count = hw->hw_info.trace_size / FJES_DEBUG_PAGE_SIZE;
for (i = 0; i < page_count; i++) {
addr = ((u8 *)hw->hw_info.trace) + i * FJES_DEBUG_PAGE_SIZE;
req_buf->start_trace.buffer[i] =
(__le64)(page_to_phys(vmalloc_to_page(addr)) +
offset_in_page(addr));
}
res_buf->start_trace.length = 0;
res_buf->start_trace.code = 0;
trace_fjes_hw_start_debug_req(req_buf);
ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_START_DEBUG);
trace_fjes_hw_start_debug(res_buf);
if (res_buf->start_trace.length !=
FJES_DEV_COMMAND_START_DBG_RES_LEN) {
result = -ENOMSG;
trace_fjes_hw_start_debug_err("Invalid res_buf");
} else if (ret == FJES_CMD_STATUS_NORMAL) {
switch (res_buf->start_trace.code) {
case FJES_CMD_REQ_RES_CODE_NORMAL:
result = 0;
break;
default:
result = -EPERM;
break;
}
} else {
switch (ret) {
case FJES_CMD_STATUS_UNKNOWN:
result = -EPERM;
break;
case FJES_CMD_STATUS_TIMEOUT:
trace_fjes_hw_start_debug_err("Busy Timeout");
result = -EBUSY;
break;
case FJES_CMD_STATUS_ERROR_PARAM:
case FJES_CMD_STATUS_ERROR_STATUS:
default:
result = -EPERM;
break;
}
}
return result;
}
int fjes_hw_stop_debug(struct fjes_hw *hw)
{
union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
enum fjes_dev_command_response_e ret;
int result = 0;
if (!hw->hw_info.trace)
return -EPERM;
memset(req_buf, 0, hw->hw_info.req_buf_size);
memset(res_buf, 0, hw->hw_info.res_buf_size);
req_buf->stop_trace.length = FJES_DEV_COMMAND_STOP_DBG_REQ_LEN;
res_buf->stop_trace.length = 0;
res_buf->stop_trace.code = 0;
ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_STOP_DEBUG);
trace_fjes_hw_stop_debug(res_buf);
if (res_buf->stop_trace.length != FJES_DEV_COMMAND_STOP_DBG_RES_LEN) {
trace_fjes_hw_stop_debug_err("Invalid res_buf");
result = -ENOMSG;
} else if (ret == FJES_CMD_STATUS_NORMAL) {
switch (res_buf->stop_trace.code) {
case FJES_CMD_REQ_RES_CODE_NORMAL:
result = 0;
hw->debug_mode = 0;
break;
default:
result = -EPERM;
break;
}
} else {
switch (ret) {
case FJES_CMD_STATUS_UNKNOWN:
result = -EPERM;
break;
case FJES_CMD_STATUS_TIMEOUT:
result = -EBUSY;
trace_fjes_hw_stop_debug_err("Busy Timeout");
break;
case FJES_CMD_STATUS_ERROR_PARAM:
case FJES_CMD_STATUS_ERROR_STATUS:
default:
result = -EPERM;
break;
}
}
return result;
}
|
linux-master
|
drivers/net/fjes/fjes_hw.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* FUJITSU Extended Socket Network Device driver
* Copyright (c) 2015 FUJITSU LIMITED
*/
/* ethtool support for fjes */
#include <linux/vmalloc.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include "fjes.h"
struct fjes_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
#define FJES_STAT(name, stat) { \
.stat_string = name, \
.sizeof_stat = sizeof_field(struct fjes_adapter, stat), \
.stat_offset = offsetof(struct fjes_adapter, stat) \
}
static const struct fjes_stats fjes_gstrings_stats[] = {
FJES_STAT("rx_packets", stats64.rx_packets),
FJES_STAT("tx_packets", stats64.tx_packets),
FJES_STAT("rx_bytes", stats64.rx_bytes),
FJES_STAT("tx_bytes", stats64.rx_bytes),
FJES_STAT("rx_dropped", stats64.rx_dropped),
FJES_STAT("tx_dropped", stats64.tx_dropped),
};
#define FJES_EP_STATS_LEN 14
#define FJES_STATS_LEN \
(ARRAY_SIZE(fjes_gstrings_stats) + \
((&((struct fjes_adapter *)netdev_priv(netdev))->hw)->max_epid - 1) * \
FJES_EP_STATS_LEN)
static void fjes_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
int epidx;
char *p;
int i;
for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) {
p = (char *)adapter + fjes_gstrings_stats[i].stat_offset;
data[i] = (fjes_gstrings_stats[i].sizeof_stat == sizeof(u64))
? *(u64 *)p : *(u32 *)p;
}
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
data[i++] = hw->ep_shm_info[epidx].ep_stats
.com_regist_buf_exec;
data[i++] = hw->ep_shm_info[epidx].ep_stats
.com_unregist_buf_exec;
data[i++] = hw->ep_shm_info[epidx].ep_stats.send_intr_rx;
data[i++] = hw->ep_shm_info[epidx].ep_stats.send_intr_unshare;
data[i++] = hw->ep_shm_info[epidx].ep_stats
.send_intr_zoneupdate;
data[i++] = hw->ep_shm_info[epidx].ep_stats.recv_intr_rx;
data[i++] = hw->ep_shm_info[epidx].ep_stats.recv_intr_unshare;
data[i++] = hw->ep_shm_info[epidx].ep_stats.recv_intr_stop;
data[i++] = hw->ep_shm_info[epidx].ep_stats
.recv_intr_zoneupdate;
data[i++] = hw->ep_shm_info[epidx].ep_stats.tx_buffer_full;
data[i++] = hw->ep_shm_info[epidx].ep_stats
.tx_dropped_not_shared;
data[i++] = hw->ep_shm_info[epidx].ep_stats
.tx_dropped_ver_mismatch;
data[i++] = hw->ep_shm_info[epidx].ep_stats
.tx_dropped_buf_size_mismatch;
data[i++] = hw->ep_shm_info[epidx].ep_stats
.tx_dropped_vlanid_mismatch;
}
}
static void fjes_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) {
memcpy(p, fjes_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < hw->max_epid; i++) {
if (i == hw->my_epid)
continue;
sprintf(p, "ep%u_com_regist_buf_exec", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_com_unregist_buf_exec", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_send_intr_rx", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_send_intr_unshare", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_send_intr_zoneupdate", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_recv_intr_rx", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_recv_intr_unshare", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_recv_intr_stop", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_recv_intr_zoneupdate", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_tx_buffer_full", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_tx_dropped_not_shared", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_tx_dropped_ver_mismatch", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_tx_dropped_buf_size_mismatch", i);
p += ETH_GSTRING_LEN;
sprintf(p, "ep%u_tx_dropped_vlanid_mismatch", i);
p += ETH_GSTRING_LEN;
}
break;
}
}
static int fjes_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return FJES_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void fjes_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct platform_device *plat_dev;
plat_dev = adapter->plat_dev;
strscpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
strscpy(drvinfo->version, fjes_driver_version,
sizeof(drvinfo->version));
strscpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
"platform:%s", plat_dev->name);
}
static int fjes_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
ethtool_link_ksettings_zero_link_mode(ecmd, supported);
ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
ecmd->base.duplex = DUPLEX_FULL;
ecmd->base.autoneg = AUTONEG_DISABLE;
ecmd->base.port = PORT_NONE;
ecmd->base.speed = 20000; /* 20Gb/s */
return 0;
}
static int fjes_get_regs_len(struct net_device *netdev)
{
#define FJES_REGS_LEN 37
return FJES_REGS_LEN * sizeof(u32);
}
static void fjes_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *p)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
u32 *regs_buff = p;
memset(p, 0, FJES_REGS_LEN * sizeof(u32));
regs->version = 1;
/* Information registers */
regs_buff[0] = rd32(XSCT_OWNER_EPID);
regs_buff[1] = rd32(XSCT_MAX_EP);
/* Device Control registers */
regs_buff[4] = rd32(XSCT_DCTL);
/* Command Control registers */
regs_buff[8] = rd32(XSCT_CR);
regs_buff[9] = rd32(XSCT_CS);
regs_buff[10] = rd32(XSCT_SHSTSAL);
regs_buff[11] = rd32(XSCT_SHSTSAH);
regs_buff[13] = rd32(XSCT_REQBL);
regs_buff[14] = rd32(XSCT_REQBAL);
regs_buff[15] = rd32(XSCT_REQBAH);
regs_buff[17] = rd32(XSCT_RESPBL);
regs_buff[18] = rd32(XSCT_RESPBAL);
regs_buff[19] = rd32(XSCT_RESPBAH);
/* Interrupt Control registers */
regs_buff[32] = rd32(XSCT_IS);
regs_buff[33] = rd32(XSCT_IMS);
regs_buff[34] = rd32(XSCT_IMC);
regs_buff[35] = rd32(XSCT_IG);
regs_buff[36] = rd32(XSCT_ICTL);
}
static int fjes_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
int ret = 0;
if (dump->flag) {
if (hw->debug_mode)
return -EPERM;
hw->debug_mode = dump->flag;
/* enable debug mode */
mutex_lock(&hw->hw_info.lock);
ret = fjes_hw_start_debug(hw);
mutex_unlock(&hw->hw_info.lock);
if (ret)
hw->debug_mode = 0;
} else {
if (!hw->debug_mode)
return -EPERM;
/* disable debug mode */
mutex_lock(&hw->hw_info.lock);
ret = fjes_hw_stop_debug(hw);
mutex_unlock(&hw->hw_info.lock);
}
return ret;
}
static int fjes_get_dump_flag(struct net_device *netdev,
struct ethtool_dump *dump)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
dump->len = hw->hw_info.trace_size;
dump->version = 1;
dump->flag = hw->debug_mode;
return 0;
}
static int fjes_get_dump_data(struct net_device *netdev,
struct ethtool_dump *dump, void *buf)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
int ret = 0;
if (hw->hw_info.trace)
memcpy(buf, hw->hw_info.trace, hw->hw_info.trace_size);
else
ret = -EPERM;
return ret;
}
static const struct ethtool_ops fjes_ethtool_ops = {
.get_drvinfo = fjes_get_drvinfo,
.get_ethtool_stats = fjes_get_ethtool_stats,
.get_strings = fjes_get_strings,
.get_sset_count = fjes_get_sset_count,
.get_regs = fjes_get_regs,
.get_regs_len = fjes_get_regs_len,
.set_dump = fjes_set_dump,
.get_dump_flag = fjes_get_dump_flag,
.get_dump_data = fjes_get_dump_data,
.get_link_ksettings = fjes_get_link_ksettings,
};
void fjes_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &fjes_ethtool_ops;
}
|
linux-master
|
drivers/net/fjes/fjes_ethtool.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* FUJITSU Extended Socket Network Device driver
* Copyright (c) 2015-2016 FUJITSU LIMITED
*/
/* debugfs support for fjes driver */
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include "fjes.h"
static struct dentry *fjes_debug_root;
static const char * const ep_status_string[] = {
"unshared",
"shared",
"waiting",
"complete",
};
static int fjes_dbg_status_show(struct seq_file *m, void *v)
{
struct fjes_adapter *adapter = m->private;
struct fjes_hw *hw = &adapter->hw;
int max_epid = hw->max_epid;
int my_epid = hw->my_epid;
int epidx;
seq_puts(m, "EPID\tSTATUS SAME_ZONE CONNECTED\n");
for (epidx = 0; epidx < max_epid; epidx++) {
if (epidx == my_epid) {
seq_printf(m, "ep%d\t%-16c %-16c %-16c\n",
epidx, '-', '-', '-');
} else {
seq_printf(m, "ep%d\t%-16s %-16c %-16c\n",
epidx,
ep_status_string[fjes_hw_get_partner_ep_status(hw, epidx)],
fjes_hw_epid_is_same_zone(hw, epidx) ? 'Y' : 'N',
fjes_hw_epid_is_shared(hw->hw_info.share, epidx) ? 'Y' : 'N');
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(fjes_dbg_status);
void fjes_dbg_adapter_init(struct fjes_adapter *adapter)
{
const char *name = dev_name(&adapter->plat_dev->dev);
adapter->dbg_adapter = debugfs_create_dir(name, fjes_debug_root);
debugfs_create_file("status", 0444, adapter->dbg_adapter, adapter,
&fjes_dbg_status_fops);
}
void fjes_dbg_adapter_exit(struct fjes_adapter *adapter)
{
debugfs_remove_recursive(adapter->dbg_adapter);
adapter->dbg_adapter = NULL;
}
void fjes_dbg_init(void)
{
fjes_debug_root = debugfs_create_dir(fjes_driver_name, NULL);
}
void fjes_dbg_exit(void)
{
debugfs_remove_recursive(fjes_debug_root);
fjes_debug_root = NULL;
}
#endif /* CONFIG_DEBUG_FS */
|
linux-master
|
drivers/net/fjes/fjes_debugfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* FUJITSU Extended Socket Network Device driver
* Copyright (c) 2015 FUJITSU LIMITED
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/nls.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
#include "fjes.h"
#include "fjes_trace.h"
#define MAJ 1
#define MIN 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
#define DRV_NAME "fjes"
char fjes_driver_name[] = DRV_NAME;
char fjes_driver_version[] = DRV_VERSION;
static const char fjes_driver_string[] =
"FUJITSU Extended Socket Network Device Driver";
static const char fjes_copyright[] =
"Copyright (c) 2015 FUJITSU LIMITED";
MODULE_AUTHOR("Taku Izumi <[email protected]>");
MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
#define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02"
static const struct acpi_device_id fjes_acpi_ids[] = {
{ACPI_MOTHERBOARD_RESOURCE_HID, 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
static bool is_extended_socket_device(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
union acpi_object *str;
acpi_status status;
int result;
status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
if (ACPI_FAILURE(status))
return false;
str = buffer.pointer;
result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
str->string.length, UTF16_LITTLE_ENDIAN,
str_buf, sizeof(str_buf) - 1);
str_buf[result] = 0;
if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
kfree(buffer.pointer);
return false;
}
kfree(buffer.pointer);
return true;
}
static int acpi_check_extended_socket_status(struct acpi_device *device)
{
unsigned long long sta;
acpi_status status;
status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
if (!((sta & ACPI_STA_DEVICE_PRESENT) &&
(sta & ACPI_STA_DEVICE_ENABLED) &&
(sta & ACPI_STA_DEVICE_UI) &&
(sta & ACPI_STA_DEVICE_FUNCTIONING)))
return -ENODEV;
return 0;
}
static acpi_status
fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
{
struct acpi_resource_address32 *addr;
struct acpi_resource_irq *irq;
struct resource *res = data;
switch (acpi_res->type) {
case ACPI_RESOURCE_TYPE_ADDRESS32:
addr = &acpi_res->data.address32;
res[0].start = addr->address.minimum;
res[0].end = addr->address.minimum +
addr->address.address_length - 1;
break;
case ACPI_RESOURCE_TYPE_IRQ:
irq = &acpi_res->data.irq;
if (irq->interrupt_count != 1)
return AE_ERROR;
res[1].start = irq->interrupts[0];
res[1].end = irq->interrupts[0];
break;
default:
break;
}
return AE_OK;
}
static struct resource fjes_resource[] = {
DEFINE_RES_MEM(0, 1),
DEFINE_RES_IRQ(0)
};
static int fjes_acpi_add(struct acpi_device *device)
{
struct platform_device *plat_dev;
acpi_status status;
if (!is_extended_socket_device(device))
return -ENODEV;
if (acpi_check_extended_socket_status(device))
return -ENODEV;
status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
fjes_get_acpi_resource, fjes_resource);
if (ACPI_FAILURE(status))
return -ENODEV;
/* create platform_device */
plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
ARRAY_SIZE(fjes_resource));
if (IS_ERR(plat_dev))
return PTR_ERR(plat_dev);
device->driver_data = plat_dev;
return 0;
}
static void fjes_acpi_remove(struct acpi_device *device)
{
struct platform_device *plat_dev;
plat_dev = (struct platform_device *)acpi_driver_data(device);
platform_device_unregister(plat_dev);
}
static struct acpi_driver fjes_acpi_driver = {
.name = DRV_NAME,
.class = DRV_NAME,
.owner = THIS_MODULE,
.ids = fjes_acpi_ids,
.ops = {
.add = fjes_acpi_add,
.remove = fjes_acpi_remove,
},
};
static int fjes_setup_resources(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ep_share_mem_info *buf_pair;
struct fjes_hw *hw = &adapter->hw;
unsigned long flags;
int result;
int epidx;
mutex_lock(&hw->hw_info.lock);
result = fjes_hw_request_info(hw);
switch (result) {
case 0:
for (epidx = 0; epidx < hw->max_epid; epidx++) {
hw->ep_shm_info[epidx].es_status =
hw->hw_info.res_buf->info.info[epidx].es_status;
hw->ep_shm_info[epidx].zone =
hw->hw_info.res_buf->info.info[epidx].zone;
}
break;
default:
case -ENOMSG:
case -EBUSY:
adapter->force_reset = true;
mutex_unlock(&hw->hw_info.lock);
return result;
}
mutex_unlock(&hw->hw_info.lock);
for (epidx = 0; epidx < (hw->max_epid); epidx++) {
if ((epidx != hw->my_epid) &&
(hw->ep_shm_info[epidx].es_status ==
FJES_ZONING_STATUS_ENABLE)) {
fjes_hw_raise_interrupt(hw, epidx,
REG_ICTL_MASK_INFO_UPDATE);
hw->ep_shm_info[epidx].ep_stats
.send_intr_zoneupdate += 1;
}
}
msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
for (epidx = 0; epidx < (hw->max_epid); epidx++) {
if (epidx == hw->my_epid)
continue;
buf_pair = &hw->ep_shm_info[epidx];
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
netdev->mtu);
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
if (fjes_hw_epid_is_same_zone(hw, epidx)) {
mutex_lock(&hw->hw_info.lock);
result =
fjes_hw_register_buff_addr(hw, epidx, buf_pair);
mutex_unlock(&hw->hw_info.lock);
switch (result) {
case 0:
break;
case -ENOMSG:
case -EBUSY:
default:
adapter->force_reset = true;
return result;
}
hw->ep_shm_info[epidx].ep_stats
.com_regist_buf_exec += 1;
}
}
return 0;
}
static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
{
struct fjes_hw *hw = &adapter->hw;
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
adapter->unset_rx_last = true;
napi_schedule(&adapter->napi);
}
static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status status;
unsigned long flags;
set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
status = fjes_hw_get_partner_ep_status(hw, src_epid);
trace_fjes_stop_req_irq_pre(hw, src_epid, status);
switch (status) {
case EP_PARTNER_WAITING:
spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
fallthrough;
case EP_PARTNER_UNSHARE:
case EP_PARTNER_COMPLETE:
default:
set_bit(src_epid, &adapter->unshare_watch_bitmask);
if (!work_pending(&adapter->unshare_watch_task))
queue_work(adapter->control_wq,
&adapter->unshare_watch_task);
break;
case EP_PARTNER_SHARED:
set_bit(src_epid, &hw->epstop_req_bit);
if (!work_pending(&hw->epstop_task))
queue_work(adapter->control_wq, &hw->epstop_task);
break;
}
trace_fjes_stop_req_irq_post(hw, src_epid);
}
static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
int src_epid)
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status status;
unsigned long flags;
status = fjes_hw_get_partner_ep_status(hw, src_epid);
trace_fjes_txrx_stop_req_irq_pre(hw, src_epid, status);
switch (status) {
case EP_PARTNER_UNSHARE:
case EP_PARTNER_COMPLETE:
default:
break;
case EP_PARTNER_WAITING:
if (src_epid < hw->my_epid) {
spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
set_bit(src_epid, &adapter->unshare_watch_bitmask);
if (!work_pending(&adapter->unshare_watch_task))
queue_work(adapter->control_wq,
&adapter->unshare_watch_task);
}
break;
case EP_PARTNER_SHARED:
if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
FJES_RX_STOP_REQ_REQUEST) {
set_bit(src_epid, &hw->epstop_req_bit);
if (!work_pending(&hw->epstop_task))
queue_work(adapter->control_wq,
&hw->epstop_task);
}
break;
}
trace_fjes_txrx_stop_req_irq_post(hw, src_epid);
}
static void fjes_update_zone_irq(struct fjes_adapter *adapter,
int src_epid)
{
struct fjes_hw *hw = &adapter->hw;
if (!work_pending(&hw->update_zone_task))
queue_work(adapter->control_wq, &hw->update_zone_task);
}
static irqreturn_t fjes_intr(int irq, void *data)
{
struct fjes_adapter *adapter = data;
struct fjes_hw *hw = &adapter->hw;
irqreturn_t ret;
u32 icr;
icr = fjes_hw_capture_interrupt_status(hw);
if (icr & REG_IS_MASK_IS_ASSERT) {
if (icr & REG_ICTL_MASK_RX_DATA) {
fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
.recv_intr_rx += 1;
}
if (icr & REG_ICTL_MASK_DEV_STOP_REQ) {
fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
.recv_intr_stop += 1;
}
if (icr & REG_ICTL_MASK_TXRX_STOP_REQ) {
fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
.recv_intr_unshare += 1;
}
if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
fjes_hw_set_irqmask(hw,
REG_ICTL_MASK_TXRX_STOP_DONE, true);
if (icr & REG_ICTL_MASK_INFO_UPDATE) {
fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
hw->ep_shm_info[icr & REG_IS_MASK_EPID].ep_stats
.recv_intr_zoneupdate += 1;
}
ret = IRQ_HANDLED;
} else {
ret = IRQ_NONE;
}
return ret;
}
static int fjes_request_irq(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int result = -1;
adapter->interrupt_watch_enable = true;
if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
queue_delayed_work(adapter->control_wq,
&adapter->interrupt_watch_task,
FJES_IRQ_WATCH_DELAY);
}
if (!adapter->irq_registered) {
result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
IRQF_SHARED, netdev->name, adapter);
if (result)
adapter->irq_registered = false;
else
adapter->irq_registered = true;
}
return result;
}
static void fjes_free_irq(struct fjes_adapter *adapter)
{
struct fjes_hw *hw = &adapter->hw;
adapter->interrupt_watch_enable = false;
cancel_delayed_work_sync(&adapter->interrupt_watch_task);
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
if (adapter->irq_registered) {
free_irq(adapter->hw.hw_res.irq, adapter);
adapter->irq_registered = false;
}
}
static void fjes_free_resources(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct fjes_device_command_param param;
struct ep_share_mem_info *buf_pair;
struct fjes_hw *hw = &adapter->hw;
bool reset_flag = false;
unsigned long flags;
int result;
int epidx;
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
mutex_lock(&hw->hw_info.lock);
result = fjes_hw_unregister_buff_addr(hw, epidx);
mutex_unlock(&hw->hw_info.lock);
hw->ep_shm_info[epidx].ep_stats.com_unregist_buf_exec += 1;
if (result)
reset_flag = true;
buf_pair = &hw->ep_shm_info[epidx];
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx,
netdev->dev_addr, netdev->mtu);
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
}
if (reset_flag || adapter->force_reset) {
result = fjes_hw_reset(hw);
adapter->force_reset = false;
if (result)
adapter->open_guard = true;
hw->hw_info.buffer_share_bit = 0;
memset((void *)¶m, 0, sizeof(param));
param.req_len = hw->hw_info.req_buf_size;
param.req_start = __pa(hw->hw_info.req_buf);
param.res_len = hw->hw_info.res_buf_size;
param.res_start = __pa(hw->hw_info.res_buf);
param.share_start = __pa(hw->hw_info.share->ep_status);
fjes_hw_init_command_registers(hw, ¶m);
}
}
/* fjes_open - Called when a network interface is made active */
static int fjes_open(struct net_device *netdev)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
int result;
if (adapter->open_guard)
return -ENXIO;
result = fjes_setup_resources(adapter);
if (result)
goto err_setup_res;
hw->txrx_stop_req_bit = 0;
hw->epstop_req_bit = 0;
napi_enable(&adapter->napi);
fjes_hw_capture_interrupt_status(hw);
result = fjes_request_irq(adapter);
if (result)
goto err_req_irq;
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
netif_tx_start_all_queues(netdev);
netif_carrier_on(netdev);
return 0;
err_req_irq:
fjes_free_irq(adapter);
napi_disable(&adapter->napi);
err_setup_res:
fjes_free_resources(adapter);
return result;
}
/* fjes_close - Disables a network interface */
static int fjes_close(struct net_device *netdev)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
unsigned long flags;
int epidx;
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
fjes_hw_raise_epstop(hw);
napi_disable(&adapter->napi);
spin_lock_irqsave(&hw->rx_status_lock, flags);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
if (fjes_hw_get_partner_ep_status(hw, epidx) ==
EP_PARTNER_SHARED)
adapter->hw.ep_shm_info[epidx]
.tx.info->v1i.rx_status &=
~FJES_RX_POLL_WORK;
}
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
fjes_free_irq(adapter);
cancel_delayed_work_sync(&adapter->interrupt_watch_task);
cancel_work_sync(&adapter->unshare_watch_task);
adapter->unshare_watch_bitmask = 0;
cancel_work_sync(&adapter->raise_intr_rxdata_task);
cancel_work_sync(&adapter->tx_stall_task);
cancel_work_sync(&hw->update_zone_task);
cancel_work_sync(&hw->epstop_task);
fjes_hw_wait_epstop(hw);
fjes_free_resources(adapter);
return 0;
}
static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
void *data, size_t len)
{
int retval;
retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
data, len);
if (retval)
return retval;
adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
FJES_TX_DELAY_SEND_PENDING;
if (!work_pending(&adapter->raise_intr_rxdata_task))
queue_work(adapter->txrx_wq,
&adapter->raise_intr_rxdata_task);
retval = 0;
return retval;
}
static netdev_tx_t
fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
int max_epid, my_epid, dest_epid;
enum ep_partner_status pstatus;
struct netdev_queue *cur_queue;
char shortpkt[VLAN_ETH_HLEN];
bool is_multi, vlan;
struct ethhdr *eth;
u16 queue_no = 0;
u16 vlan_id = 0;
netdev_tx_t ret;
char *data;
int len;
ret = NETDEV_TX_OK;
is_multi = false;
cur_queue = netdev_get_tx_queue(netdev, queue_no);
eth = (struct ethhdr *)skb->data;
my_epid = hw->my_epid;
vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
data = skb->data;
len = skb->len;
if (is_multicast_ether_addr(eth->h_dest)) {
dest_epid = 0;
max_epid = hw->max_epid;
is_multi = true;
} else if (is_local_ether_addr(eth->h_dest)) {
dest_epid = eth->h_dest[ETH_ALEN - 1];
max_epid = dest_epid + 1;
if ((eth->h_dest[0] == 0x02) &&
(0x00 == (eth->h_dest[1] | eth->h_dest[2] |
eth->h_dest[3] | eth->h_dest[4])) &&
(dest_epid < hw->max_epid)) {
;
} else {
dest_epid = 0;
max_epid = 0;
ret = NETDEV_TX_OK;
adapter->stats64.tx_packets += 1;
hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
adapter->stats64.tx_bytes += len;
hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
}
} else {
dest_epid = 0;
max_epid = 0;
ret = NETDEV_TX_OK;
adapter->stats64.tx_packets += 1;
hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
adapter->stats64.tx_bytes += len;
hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
}
for (; dest_epid < max_epid; dest_epid++) {
if (my_epid == dest_epid)
continue;
pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
if (pstatus != EP_PARTNER_SHARED) {
if (!is_multi)
hw->ep_shm_info[dest_epid].ep_stats
.tx_dropped_not_shared += 1;
ret = NETDEV_TX_OK;
} else if (!fjes_hw_check_epbuf_version(
&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
/* version is NOT 0 */
adapter->stats64.tx_carrier_errors += 1;
hw->ep_shm_info[dest_epid].net_stats
.tx_carrier_errors += 1;
hw->ep_shm_info[dest_epid].ep_stats
.tx_dropped_ver_mismatch += 1;
ret = NETDEV_TX_OK;
} else if (!fjes_hw_check_mtu(
&adapter->hw.ep_shm_info[dest_epid].rx,
netdev->mtu)) {
adapter->stats64.tx_dropped += 1;
hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
adapter->stats64.tx_errors += 1;
hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
hw->ep_shm_info[dest_epid].ep_stats
.tx_dropped_buf_size_mismatch += 1;
ret = NETDEV_TX_OK;
} else if (vlan &&
!fjes_hw_check_vlan_id(
&adapter->hw.ep_shm_info[dest_epid].rx,
vlan_id)) {
hw->ep_shm_info[dest_epid].ep_stats
.tx_dropped_vlanid_mismatch += 1;
ret = NETDEV_TX_OK;
} else {
if (len < VLAN_ETH_HLEN) {
memset(shortpkt, 0, VLAN_ETH_HLEN);
memcpy(shortpkt, skb->data, skb->len);
len = VLAN_ETH_HLEN;
data = shortpkt;
}
if (adapter->tx_retry_count == 0) {
adapter->tx_start_jiffies = jiffies;
adapter->tx_retry_count = 1;
} else {
adapter->tx_retry_count++;
}
if (fjes_tx_send(adapter, dest_epid, data, len)) {
if (is_multi) {
ret = NETDEV_TX_OK;
} else if (
((long)jiffies -
(long)adapter->tx_start_jiffies) >=
FJES_TX_RETRY_TIMEOUT) {
adapter->stats64.tx_fifo_errors += 1;
hw->ep_shm_info[dest_epid].net_stats
.tx_fifo_errors += 1;
adapter->stats64.tx_errors += 1;
hw->ep_shm_info[dest_epid].net_stats
.tx_errors += 1;
ret = NETDEV_TX_OK;
} else {
netif_trans_update(netdev);
hw->ep_shm_info[dest_epid].ep_stats
.tx_buffer_full += 1;
netif_tx_stop_queue(cur_queue);
if (!work_pending(&adapter->tx_stall_task))
queue_work(adapter->txrx_wq,
&adapter->tx_stall_task);
ret = NETDEV_TX_BUSY;
}
} else {
if (!is_multi) {
adapter->stats64.tx_packets += 1;
hw->ep_shm_info[dest_epid].net_stats
.tx_packets += 1;
adapter->stats64.tx_bytes += len;
hw->ep_shm_info[dest_epid].net_stats
.tx_bytes += len;
}
adapter->tx_retry_count = 0;
ret = NETDEV_TX_OK;
}
}
}
if (ret == NETDEV_TX_OK) {
dev_kfree_skb(skb);
if (is_multi) {
adapter->stats64.tx_packets += 1;
hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
adapter->stats64.tx_bytes += 1;
hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
}
}
return ret;
}
static void
fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
}
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
bool running = netif_running(netdev);
struct fjes_hw *hw = &adapter->hw;
unsigned long flags;
int ret = -EINVAL;
int idx, epidx;
for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
if (new_mtu <= fjes_support_mtu[idx]) {
new_mtu = fjes_support_mtu[idx];
if (new_mtu == netdev->mtu)
return 0;
ret = 0;
break;
}
}
if (ret)
return ret;
if (running) {
spin_lock_irqsave(&hw->rx_status_lock, flags);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
~FJES_RX_MTU_CHANGING_DONE;
}
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
cancel_work_sync(&adapter->tx_stall_task);
napi_disable(&adapter->napi);
msleep(1000);
netif_tx_stop_all_queues(netdev);
}
netdev->mtu = new_mtu;
if (running) {
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr,
netdev->mtu);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
FJES_RX_MTU_CHANGING_DONE;
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
netif_tx_wake_all_queues(netdev);
netif_carrier_on(netdev);
napi_enable(&adapter->napi);
napi_schedule(&adapter->napi);
}
return ret;
}
static void fjes_tx_retry(struct net_device *netdev, unsigned int txqueue)
{
struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
netif_tx_wake_queue(queue);
}
static int fjes_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
bool ret = true;
int epid;
for (epid = 0; epid < adapter->hw.max_epid; epid++) {
if (epid == adapter->hw.my_epid)
continue;
if (!fjes_hw_check_vlan_id(
&adapter->hw.ep_shm_info[epid].tx, vid))
ret = fjes_hw_set_vlan_id(
&adapter->hw.ep_shm_info[epid].tx, vid);
}
return ret ? 0 : -ENOSPC;
}
static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
int epid;
for (epid = 0; epid < adapter->hw.max_epid; epid++) {
if (epid == adapter->hw.my_epid)
continue;
fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
}
return 0;
}
static const struct net_device_ops fjes_netdev_ops = {
.ndo_open = fjes_open,
.ndo_stop = fjes_close,
.ndo_start_xmit = fjes_xmit_frame,
.ndo_get_stats64 = fjes_get_stats64,
.ndo_change_mtu = fjes_change_mtu,
.ndo_tx_timeout = fjes_tx_retry,
.ndo_vlan_rx_add_vid = fjes_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
};
/* fjes_netdev_setup - netdevice initialization routine */
static void fjes_netdev_setup(struct net_device *netdev)
{
ether_setup(netdev);
netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
netdev->netdev_ops = &fjes_netdev_ops;
fjes_set_ethtool_ops(netdev);
netdev->mtu = fjes_support_mtu[3];
netdev->min_mtu = fjes_support_mtu[0];
netdev->max_mtu = fjes_support_mtu[3];
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
int start_epid)
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status pstatus;
int max_epid, cur_epid;
int i;
max_epid = hw->max_epid;
start_epid = (start_epid + 1 + max_epid) % max_epid;
for (i = 0; i < max_epid; i++) {
cur_epid = (start_epid + i) % max_epid;
if (cur_epid == hw->my_epid)
continue;
pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
if (pstatus == EP_PARTNER_SHARED) {
if (!fjes_hw_epbuf_rx_is_empty(
&hw->ep_shm_info[cur_epid].rx))
return cur_epid;
}
}
return -1;
}
static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
int *cur_epid)
{
void *frame;
*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
if (*cur_epid < 0)
return NULL;
frame =
fjes_hw_epbuf_rx_curpkt_get_addr(
&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
return frame;
}
static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
{
fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
}
static int fjes_poll(struct napi_struct *napi, int budget)
{
struct fjes_adapter *adapter =
container_of(napi, struct fjes_adapter, napi);
struct net_device *netdev = napi->dev;
struct fjes_hw *hw = &adapter->hw;
struct sk_buff *skb;
int work_done = 0;
int cur_epid = 0;
int epidx;
size_t frame_len;
void *frame;
spin_lock(&hw->rx_status_lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
if (fjes_hw_get_partner_ep_status(hw, epidx) ==
EP_PARTNER_SHARED)
adapter->hw.ep_shm_info[epidx]
.tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
}
spin_unlock(&hw->rx_status_lock);
while (work_done < budget) {
prefetch(&adapter->hw);
frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
if (frame) {
skb = napi_alloc_skb(napi, frame_len);
if (!skb) {
adapter->stats64.rx_dropped += 1;
hw->ep_shm_info[cur_epid].net_stats
.rx_dropped += 1;
adapter->stats64.rx_errors += 1;
hw->ep_shm_info[cur_epid].net_stats
.rx_errors += 1;
} else {
skb_put_data(skb, frame, frame_len);
skb->protocol = eth_type_trans(skb, netdev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_receive_skb(skb);
work_done++;
adapter->stats64.rx_packets += 1;
hw->ep_shm_info[cur_epid].net_stats
.rx_packets += 1;
adapter->stats64.rx_bytes += frame_len;
hw->ep_shm_info[cur_epid].net_stats
.rx_bytes += frame_len;
if (is_multicast_ether_addr(
((struct ethhdr *)frame)->h_dest)) {
adapter->stats64.multicast += 1;
hw->ep_shm_info[cur_epid].net_stats
.multicast += 1;
}
}
fjes_rxframe_release(adapter, cur_epid);
adapter->unset_rx_last = true;
} else {
break;
}
}
if (work_done < budget) {
napi_complete_done(napi, work_done);
if (adapter->unset_rx_last) {
adapter->rx_last_jiffies = jiffies;
adapter->unset_rx_last = false;
}
if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
napi_reschedule(napi);
} else {
spin_lock(&hw->rx_status_lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
if (fjes_hw_get_partner_ep_status(hw, epidx) ==
EP_PARTNER_SHARED)
adapter->hw.ep_shm_info[epidx].tx
.info->v1i.rx_status &=
~FJES_RX_POLL_WORK;
}
spin_unlock(&hw->rx_status_lock);
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
}
}
return work_done;
}
static int fjes_sw_init(struct fjes_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
netif_napi_add(netdev, &adapter->napi, fjes_poll);
return 0;
}
static void fjes_force_close_task(struct work_struct *work)
{
struct fjes_adapter *adapter = container_of(work,
struct fjes_adapter, force_close_task);
struct net_device *netdev = adapter->netdev;
rtnl_lock();
dev_close(netdev);
rtnl_unlock();
}
static void fjes_tx_stall_task(struct work_struct *work)
{
struct fjes_adapter *adapter = container_of(work,
struct fjes_adapter, tx_stall_task);
struct net_device *netdev = adapter->netdev;
struct fjes_hw *hw = &adapter->hw;
int all_queue_available, sendable;
enum ep_partner_status pstatus;
int max_epid, my_epid, epid;
union ep_buffer_info *info;
int i;
if (((long)jiffies -
dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
netif_wake_queue(netdev);
return;
}
my_epid = hw->my_epid;
max_epid = hw->max_epid;
for (i = 0; i < 5; i++) {
all_queue_available = 1;
for (epid = 0; epid < max_epid; epid++) {
if (my_epid == epid)
continue;
pstatus = fjes_hw_get_partner_ep_status(hw, epid);
sendable = (pstatus == EP_PARTNER_SHARED);
if (!sendable)
continue;
info = adapter->hw.ep_shm_info[epid].tx.info;
if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
return;
if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
info->v1i.count_max)) {
all_queue_available = 0;
break;
}
}
if (all_queue_available) {
netif_wake_queue(netdev);
return;
}
}
usleep_range(50, 100);
queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
}
static void fjes_raise_intr_rxdata_task(struct work_struct *work)
{
struct fjes_adapter *adapter = container_of(work,
struct fjes_adapter, raise_intr_rxdata_task);
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status pstatus;
int max_epid, my_epid, epid;
my_epid = hw->my_epid;
max_epid = hw->max_epid;
for (epid = 0; epid < max_epid; epid++)
hw->ep_shm_info[epid].tx_status_work = 0;
for (epid = 0; epid < max_epid; epid++) {
if (epid == my_epid)
continue;
pstatus = fjes_hw_get_partner_ep_status(hw, epid);
if (pstatus == EP_PARTNER_SHARED) {
hw->ep_shm_info[epid].tx_status_work =
hw->ep_shm_info[epid].tx.info->v1i.tx_status;
if (hw->ep_shm_info[epid].tx_status_work ==
FJES_TX_DELAY_SEND_PENDING) {
hw->ep_shm_info[epid].tx.info->v1i.tx_status =
FJES_TX_DELAY_SEND_NONE;
}
}
}
for (epid = 0; epid < max_epid; epid++) {
if (epid == my_epid)
continue;
pstatus = fjes_hw_get_partner_ep_status(hw, epid);
if ((hw->ep_shm_info[epid].tx_status_work ==
FJES_TX_DELAY_SEND_PENDING) &&
(pstatus == EP_PARTNER_SHARED) &&
!(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
FJES_RX_POLL_WORK)) {
fjes_hw_raise_interrupt(hw, epid,
REG_ICTL_MASK_RX_DATA);
hw->ep_shm_info[epid].ep_stats.send_intr_rx += 1;
}
}
usleep_range(500, 1000);
}
static void fjes_watch_unshare_task(struct work_struct *work)
{
struct fjes_adapter *adapter =
container_of(work, struct fjes_adapter, unshare_watch_task);
struct net_device *netdev = adapter->netdev;
struct fjes_hw *hw = &adapter->hw;
int unshare_watch, unshare_reserve;
int max_epid, my_epid, epidx;
int stop_req, stop_req_done;
ulong unshare_watch_bitmask;
unsigned long flags;
int wait_time = 0;
int is_shared;
int ret;
my_epid = hw->my_epid;
max_epid = hw->max_epid;
unshare_watch_bitmask = adapter->unshare_watch_bitmask;
adapter->unshare_watch_bitmask = 0;
while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
(wait_time < 3000)) {
for (epidx = 0; epidx < max_epid; epidx++) {
if (epidx == my_epid)
continue;
is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
epidx);
stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
FJES_RX_STOP_REQ_DONE;
unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
unshare_reserve = test_bit(epidx,
&hw->hw_info.buffer_unshare_reserve_bit);
if ((!stop_req ||
(is_shared && (!is_shared || !stop_req_done))) &&
(is_shared || !unshare_watch || !unshare_reserve))
continue;
mutex_lock(&hw->hw_info.lock);
ret = fjes_hw_unregister_buff_addr(hw, epidx);
switch (ret) {
case 0:
break;
case -ENOMSG:
case -EBUSY:
default:
if (!work_pending(
&adapter->force_close_task)) {
adapter->force_reset = true;
schedule_work(
&adapter->force_close_task);
}
break;
}
mutex_unlock(&hw->hw_info.lock);
hw->ep_shm_info[epidx].ep_stats
.com_unregist_buf_exec += 1;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
clear_bit(epidx, &unshare_watch_bitmask);
clear_bit(epidx,
&hw->hw_info.buffer_unshare_reserve_bit);
}
msleep(100);
wait_time += 100;
}
if (hw->hw_info.buffer_unshare_reserve_bit) {
for (epidx = 0; epidx < max_epid; epidx++) {
if (epidx == my_epid)
continue;
if (test_bit(epidx,
&hw->hw_info.buffer_unshare_reserve_bit)) {
mutex_lock(&hw->hw_info.lock);
ret = fjes_hw_unregister_buff_addr(hw, epidx);
switch (ret) {
case 0:
break;
case -ENOMSG:
case -EBUSY:
default:
if (!work_pending(
&adapter->force_close_task)) {
adapter->force_reset = true;
schedule_work(
&adapter->force_close_task);
}
break;
}
mutex_unlock(&hw->hw_info.lock);
hw->ep_shm_info[epidx].ep_stats
.com_unregist_buf_exec += 1;
spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
spin_unlock_irqrestore(&hw->rx_status_lock,
flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
clear_bit(epidx, &unshare_watch_bitmask);
clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
}
if (test_bit(epidx, &unshare_watch_bitmask)) {
spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
~FJES_RX_STOP_REQ_DONE;
spin_unlock_irqrestore(&hw->rx_status_lock,
flags);
}
}
}
}
static void fjes_irq_watch_task(struct work_struct *work)
{
struct fjes_adapter *adapter = container_of(to_delayed_work(work),
struct fjes_adapter, interrupt_watch_task);
local_irq_disable();
fjes_intr(adapter->hw.hw_res.irq, adapter);
local_irq_enable();
if (fjes_rxframe_search_exist(adapter, 0) >= 0)
napi_schedule(&adapter->napi);
if (adapter->interrupt_watch_enable) {
if (!delayed_work_pending(&adapter->interrupt_watch_task))
queue_delayed_work(adapter->control_wq,
&adapter->interrupt_watch_task,
FJES_IRQ_WATCH_DELAY);
}
}
/* fjes_probe - Device Initialization Routine */
static int fjes_probe(struct platform_device *plat_dev)
{
struct fjes_adapter *adapter;
struct net_device *netdev;
struct resource *res;
struct fjes_hw *hw;
u8 addr[ETH_ALEN];
int err;
err = -ENOMEM;
netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
NET_NAME_UNKNOWN, fjes_netdev_setup,
FJES_MAX_QUEUES);
if (!netdev)
goto err_out;
SET_NETDEV_DEV(netdev, &plat_dev->dev);
dev_set_drvdata(&plat_dev->dev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->plat_dev = plat_dev;
hw = &adapter->hw;
hw->back = adapter;
/* setup the private structure */
err = fjes_sw_init(adapter);
if (err)
goto err_free_netdev;
INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
adapter->force_reset = false;
adapter->open_guard = false;
adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
if (unlikely(!adapter->txrx_wq)) {
err = -ENOMEM;
goto err_free_netdev;
}
adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
WQ_MEM_RECLAIM, 0);
if (unlikely(!adapter->control_wq)) {
err = -ENOMEM;
goto err_free_txrx_wq;
}
INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
INIT_WORK(&adapter->raise_intr_rxdata_task,
fjes_raise_intr_rxdata_task);
INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
adapter->unshare_watch_bitmask = 0;
INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
adapter->interrupt_watch_enable = false;
res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
if (!res) {
err = -EINVAL;
goto err_free_control_wq;
}
hw->hw_res.start = res->start;
hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
if (hw->hw_res.irq < 0) {
err = hw->hw_res.irq;
goto err_free_control_wq;
}
err = fjes_hw_init(&adapter->hw);
if (err)
goto err_free_control_wq;
/* setup MAC address (02:00:00:00:00:[epid])*/
addr[0] = 2;
addr[1] = 0;
addr[2] = 0;
addr[3] = 0;
addr[4] = 0;
addr[5] = hw->my_epid; /* EPID */
eth_hw_addr_set(netdev, addr);
err = register_netdev(netdev);
if (err)
goto err_hw_exit;
netif_carrier_off(netdev);
fjes_dbg_adapter_init(adapter);
return 0;
err_hw_exit:
fjes_hw_exit(&adapter->hw);
err_free_control_wq:
destroy_workqueue(adapter->control_wq);
err_free_txrx_wq:
destroy_workqueue(adapter->txrx_wq);
err_free_netdev:
free_netdev(netdev);
err_out:
return err;
}
/* fjes_remove - Device Removal Routine */
static int fjes_remove(struct platform_device *plat_dev)
{
struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
fjes_dbg_adapter_exit(adapter);
cancel_delayed_work_sync(&adapter->interrupt_watch_task);
cancel_work_sync(&adapter->unshare_watch_task);
cancel_work_sync(&adapter->raise_intr_rxdata_task);
cancel_work_sync(&adapter->tx_stall_task);
if (adapter->control_wq)
destroy_workqueue(adapter->control_wq);
if (adapter->txrx_wq)
destroy_workqueue(adapter->txrx_wq);
unregister_netdev(netdev);
fjes_hw_exit(hw);
netif_napi_del(&adapter->napi);
free_netdev(netdev);
return 0;
}
static struct platform_driver fjes_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = fjes_probe,
.remove = fjes_remove,
};
static acpi_status
acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level,
void *context, void **return_value)
{
struct acpi_device *device;
bool *found = context;
device = acpi_fetch_acpi_dev(obj_handle);
if (!device)
return AE_OK;
if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID))
return AE_OK;
if (!is_extended_socket_device(device))
return AE_OK;
if (acpi_check_extended_socket_status(device))
return AE_OK;
*found = true;
return AE_CTRL_TERMINATE;
}
/* fjes_init_module - Driver Registration Routine */
static int __init fjes_init_module(void)
{
bool found = false;
int result;
acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
acpi_find_extended_socket_device, NULL, &found,
NULL);
if (!found)
return -ENODEV;
pr_info("%s - version %s - %s\n",
fjes_driver_string, fjes_driver_version, fjes_copyright);
fjes_dbg_init();
result = platform_driver_register(&fjes_driver);
if (result < 0) {
fjes_dbg_exit();
return result;
}
result = acpi_bus_register_driver(&fjes_acpi_driver);
if (result < 0)
goto fail_acpi_driver;
return 0;
fail_acpi_driver:
platform_driver_unregister(&fjes_driver);
fjes_dbg_exit();
return result;
}
module_init(fjes_init_module);
/* fjes_exit_module - Driver Exit Cleanup Routine */
static void __exit fjes_exit_module(void)
{
acpi_bus_unregister_driver(&fjes_acpi_driver);
platform_driver_unregister(&fjes_driver);
fjes_dbg_exit();
}
module_exit(fjes_exit_module);
|
linux-master
|
drivers/net/fjes/fjes_main.c
|
/*
* Network-device interface management.
*
* Copyright (c) 2004-2005, Keir Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "common.h"
#include <linux/kthread.h>
#include <linux/sched/task.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <xen/events.h>
#include <asm/xen/hypercall.h>
#include <xen/balloon.h>
#define XENVIF_QUEUE_LENGTH 32
/* Number of bytes allowed on the internal guest Rx queue. */
#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
/* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
* increasing the inflight counter. We need to increase the inflight
* counter because core driver calls into xenvif_zerocopy_callback
* which calls xenvif_skb_zerocopy_complete.
*/
void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb)
{
skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
atomic_inc(&queue->inflight_packets);
}
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
{
atomic_dec(&queue->inflight_packets);
/* Wake the dealloc thread _after_ decrementing inflight_packets so
* that if kthread_stop() has already been called, the dealloc thread
* does not wait forever with nothing to wake it.
*/
wake_up(&queue->dealloc_wq);
}
static int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) &&
test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
!vif->disabled;
}
static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
{
bool rc;
rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
if (rc)
napi_schedule(&queue->napi);
return rc;
}
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
int old;
old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
if (!xenvif_handle_tx_interrupt(queue)) {
atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
}
return IRQ_HANDLED;
}
static int xenvif_poll(struct napi_struct *napi, int budget)
{
struct xenvif_queue *queue =
container_of(napi, struct xenvif_queue, napi);
int work_done;
/* This vif is rogue, we pretend we've there is nothing to do
* for this vif to deschedule it from NAPI. But this interface
* will be turned off in thread context later.
*/
if (unlikely(queue->vif->disabled)) {
napi_complete(napi);
return 0;
}
work_done = xenvif_tx_action(queue, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
/* If the queue is rate-limited, it shall be
* rescheduled in the timer callback.
*/
if (likely(!queue->rate_limited))
xenvif_napi_schedule_or_enable_events(queue);
}
return work_done;
}
static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
{
bool rc;
rc = xenvif_have_rx_work(queue, false);
if (rc)
xenvif_kick_thread(queue);
return rc;
}
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
int old;
old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
if (!xenvif_handle_rx_interrupt(queue)) {
atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
}
return IRQ_HANDLED;
}
irqreturn_t xenvif_interrupt(int irq, void *dev_id)
{
struct xenvif_queue *queue = dev_id;
int old;
bool has_rx, has_tx;
old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
WARN(old, "Interrupt while EOI pending\n");
has_tx = xenvif_handle_tx_interrupt(queue);
has_rx = xenvif_handle_rx_interrupt(queue);
if (!has_rx && !has_tx) {
atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
}
return IRQ_HANDLED;
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
unsigned int num_queues;
/* If queues are not set up internally - always return 0
* as the packet going to be dropped anyway */
num_queues = READ_ONCE(vif->num_queues);
if (num_queues < 1)
return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return netdev_pick_tx(dev, skb, NULL) %
dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
if (size == 0)
return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
return vif->hash.mapping[vif->hash.mapping_sel]
[skb_get_hash_raw(skb) % size];
}
static netdev_tx_t
xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
unsigned int num_queues;
u16 index;
struct xenvif_rx_cb *cb;
BUG_ON(skb->dev != dev);
/* Drop the packet if queues are not set up.
* This handler should be called inside an RCU read section
* so we don't need to enter it here explicitly.
*/
num_queues = READ_ONCE(vif->num_queues);
if (num_queues < 1)
goto drop;
/* Obtain the queue to be used to transmit this packet */
index = skb_get_queue_mapping(skb);
if (index >= num_queues) {
pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
index, vif->dev->name);
index %= num_queues;
}
queue = &vif->queues[index];
/* Drop the packet if queue is not ready */
if (queue->task == NULL ||
queue->dealloc_task == NULL ||
!xenvif_schedulable(vif))
goto drop;
if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
struct ethhdr *eth = (struct ethhdr *)skb->data;
if (!xenvif_mcast_match(vif, eth->h_dest))
goto drop;
}
cb = XENVIF_RX_CB(skb);
cb->expires = jiffies + vif->drain_timeout;
/* If there is no hash algorithm configured then make sure there
* is no hash information in the socket buffer otherwise it
* would be incorrectly forwarded to the frontend.
*/
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
skb_clear_hash(skb);
if (!xenvif_rx_queue_tail(queue, skb))
goto drop;
xenvif_kick_thread(queue);
return NETDEV_TX_OK;
drop:
vif->dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
unsigned int num_queues;
u64 rx_bytes = 0;
u64 rx_packets = 0;
u64 tx_bytes = 0;
u64 tx_packets = 0;
unsigned int index;
rcu_read_lock();
num_queues = READ_ONCE(vif->num_queues);
/* Aggregate tx and rx stats from each queue */
for (index = 0; index < num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
tx_bytes += queue->stats.tx_bytes;
tx_packets += queue->stats.tx_packets;
}
rcu_read_unlock();
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
vif->dev->stats.tx_bytes = tx_bytes;
vif->dev->stats.tx_packets = tx_packets;
return &vif->dev->stats;
}
static void xenvif_up(struct xenvif *vif)
{
struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
napi_enable(&queue->napi);
enable_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
enable_irq(queue->rx_irq);
xenvif_napi_schedule_or_enable_events(queue);
}
}
static void xenvif_down(struct xenvif *vif)
{
struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
disable_irq(queue->tx_irq);
if (queue->tx_irq != queue->rx_irq)
disable_irq(queue->rx_irq);
napi_disable(&queue->napi);
del_timer_sync(&queue->credit_timeout);
}
}
static int xenvif_open(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
xenvif_up(vif);
netif_tx_start_all_queues(dev);
return 0;
}
static int xenvif_close(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
xenvif_down(vif);
netif_tx_stop_all_queues(dev);
return 0;
}
static int xenvif_change_mtu(struct net_device *dev, int mtu)
{
struct xenvif *vif = netdev_priv(dev);
int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
if (mtu > max)
return -EINVAL;
dev->mtu = mtu;
return 0;
}
static netdev_features_t xenvif_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct xenvif *vif = netdev_priv(dev);
if (!vif->can_sg)
features &= ~NETIF_F_SG;
if (~(vif->gso_mask) & GSO_BIT(TCPV4))
features &= ~NETIF_F_TSO;
if (~(vif->gso_mask) & GSO_BIT(TCPV6))
features &= ~NETIF_F_TSO6;
if (!vif->ip_csum)
features &= ~NETIF_F_IP_CSUM;
if (!vif->ipv6_csum)
features &= ~NETIF_F_IPV6_CSUM;
return features;
}
static const struct xenvif_stat {
char name[ETH_GSTRING_LEN];
u16 offset;
} xenvif_stats[] = {
{
"rx_gso_checksum_fixup",
offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
},
/* If (sent != success + fail), there are probably packets never
* freed up properly!
*/
{
"tx_zerocopy_sent",
offsetof(struct xenvif_stats, tx_zerocopy_sent),
},
{
"tx_zerocopy_success",
offsetof(struct xenvif_stats, tx_zerocopy_success),
},
{
"tx_zerocopy_fail",
offsetof(struct xenvif_stats, tx_zerocopy_fail)
},
/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
* a guest with the same MAX_SKB_FRAG
*/
{
"tx_frag_overflow",
offsetof(struct xenvif_stats, tx_frag_overflow)
},
};
static int xenvif_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return ARRAY_SIZE(xenvif_stats);
default:
return -EINVAL;
}
}
static void xenvif_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
struct xenvif *vif = netdev_priv(dev);
unsigned int num_queues;
int i;
unsigned int queue_index;
rcu_read_lock();
num_queues = READ_ONCE(vif->num_queues);
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
unsigned long accum = 0;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
void *vif_stats = &vif->queues[queue_index].stats;
accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
}
data[i] = accum;
}
rcu_read_unlock();
}
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
xenvif_stats[i].name, ETH_GSTRING_LEN);
break;
}
}
static const struct ethtool_ops xenvif_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_sset_count = xenvif_get_sset_count,
.get_ethtool_stats = xenvif_get_ethtool_stats,
.get_strings = xenvif_get_strings,
};
static const struct net_device_ops xenvif_netdev_ops = {
.ndo_select_queue = xenvif_select_queue,
.ndo_start_xmit = xenvif_start_xmit,
.ndo_get_stats = xenvif_get_stats,
.ndo_open = xenvif_open,
.ndo_stop = xenvif_close,
.ndo_change_mtu = xenvif_change_mtu,
.ndo_fix_features = xenvif_fix_features,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
unsigned int handle)
{
static const u8 dummy_addr[ETH_ALEN] = {
0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
};
int err;
struct net_device *dev;
struct xenvif *vif;
char name[IFNAMSIZ] = {};
snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
/* Allocate a netdev with the max. supported number of queues.
* When the guest selects the desired number, it will be updated
* via netif_set_real_num_*_queues().
*/
dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
ether_setup, xenvif_max_queues);
if (dev == NULL) {
pr_warn("Could not allocate netdev for %s\n", name);
return ERR_PTR(-ENOMEM);
}
SET_NETDEV_DEV(dev, parent);
vif = netdev_priv(dev);
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
vif->ip_csum = 1;
vif->dev = dev;
vif->disabled = false;
vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
/* Start out with no queues. */
vif->queues = NULL;
vif->num_queues = 0;
vif->xdp_headroom = 0;
spin_lock_init(&vif->lock);
INIT_LIST_HEAD(&vif->fe_mcast_addr);
dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
dev->features = dev->hw_features | NETIF_F_RXCSUM;
dev->ethtool_ops = &xenvif_ethtool_ops;
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
/*
* Initialise a dummy MAC address. We choose the numerically
* largest non-broadcast address to prevent the address getting
* stolen by an Ethernet bridge for STP purposes.
* (FE:FF:FF:FF:FF:FF)
*/
eth_hw_addr_set(dev, dummy_addr);
netif_carrier_off(dev);
err = register_netdev(dev);
if (err) {
netdev_warn(dev, "Could not register device: err=%d\n", err);
free_netdev(dev);
return ERR_PTR(err);
}
netdev_dbg(dev, "Successfully created xenvif\n");
__module_get(THIS_MODULE);
return vif;
}
int xenvif_init_queue(struct xenvif_queue *queue)
{
int err, i;
queue->credit_bytes = queue->remaining_credit = ~0UL;
queue->credit_usec = 0UL;
timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
queue->credit_window_start = get_jiffies_64();
queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
skb_queue_head_init(&queue->rx_queue);
skb_queue_head_init(&queue->tx_queue);
queue->pending_cons = 0;
queue->pending_prod = MAX_PENDING_REQS;
for (i = 0; i < MAX_PENDING_REQS; ++i)
queue->pending_ring[i] = i;
spin_lock_init(&queue->callback_lock);
spin_lock_init(&queue->response_lock);
/* If ballooning is disabled, this will consume real memory, so you
* better enable it. The long term solution would be to use just a
* bunch of valid page descriptors, without dependency on ballooning
*/
err = gnttab_alloc_pages(MAX_PENDING_REQS,
queue->mmap_pages);
if (err) {
netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
return -ENOMEM;
}
for (i = 0; i < MAX_PENDING_REQS; i++) {
queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
{ { .callback = xenvif_zerocopy_callback },
{ { .ctx = NULL,
.desc = i } } };
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
}
return 0;
}
void xenvif_carrier_on(struct xenvif *vif)
{
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
netdev_update_features(vif->dev);
set_bit(VIF_STATUS_CONNECTED, &vif->status);
if (netif_running(vif->dev))
xenvif_up(vif);
rtnl_unlock();
}
int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
unsigned int evtchn)
{
struct net_device *dev = vif->dev;
struct xenbus_device *xendev = xenvif_to_xenbus_device(vif);
void *addr;
struct xen_netif_ctrl_sring *shared;
RING_IDX rsp_prod, req_prod;
int err;
err = xenbus_map_ring_valloc(xendev, &ring_ref, 1, &addr);
if (err)
goto err;
shared = (struct xen_netif_ctrl_sring *)addr;
rsp_prod = READ_ONCE(shared->rsp_prod);
req_prod = READ_ONCE(shared->req_prod);
BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
err = -EIO;
if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
goto err_unmap;
err = bind_interdomain_evtchn_to_irq_lateeoi(xendev, evtchn);
if (err < 0)
goto err_unmap;
vif->ctrl_irq = err;
xenvif_init_hash(vif);
err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
IRQF_ONESHOT, "xen-netback-ctrl", vif);
if (err) {
pr_warn("Could not setup irq handler for %s\n", dev->name);
goto err_deinit;
}
return 0;
err_deinit:
xenvif_deinit_hash(vif);
unbind_from_irqhandler(vif->ctrl_irq, vif);
vif->ctrl_irq = 0;
err_unmap:
xenbus_unmap_ring_vfree(xendev, vif->ctrl.sring);
vif->ctrl.sring = NULL;
err:
return err;
}
static void xenvif_disconnect_queue(struct xenvif_queue *queue)
{
if (queue->task) {
kthread_stop(queue->task);
put_task_struct(queue->task);
queue->task = NULL;
}
if (queue->dealloc_task) {
kthread_stop(queue->dealloc_task);
queue->dealloc_task = NULL;
}
if (queue->napi.poll) {
netif_napi_del(&queue->napi);
queue->napi.poll = NULL;
}
if (queue->tx_irq) {
unbind_from_irqhandler(queue->tx_irq, queue);
if (queue->tx_irq == queue->rx_irq)
queue->rx_irq = 0;
queue->tx_irq = 0;
}
if (queue->rx_irq) {
unbind_from_irqhandler(queue->rx_irq, queue);
queue->rx_irq = 0;
}
xenvif_unmap_frontend_data_rings(queue);
}
int xenvif_connect_data(struct xenvif_queue *queue,
unsigned long tx_ring_ref,
unsigned long rx_ring_ref,
unsigned int tx_evtchn,
unsigned int rx_evtchn)
{
struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif);
struct task_struct *task;
int err;
BUG_ON(queue->tx_irq);
BUG_ON(queue->task);
BUG_ON(queue->dealloc_task);
err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
rx_ring_ref);
if (err < 0)
goto err;
init_waitqueue_head(&queue->wq);
init_waitqueue_head(&queue->dealloc_wq);
atomic_set(&queue->inflight_packets, 0);
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll);
queue->stalled = true;
task = kthread_run(xenvif_kthread_guest_rx, queue,
"%s-guest-rx", queue->name);
if (IS_ERR(task))
goto kthread_err;
queue->task = task;
/*
* Take a reference to the task in order to prevent it from being freed
* if the thread function returns before kthread_stop is called.
*/
get_task_struct(task);
task = kthread_run(xenvif_dealloc_kthread, queue,
"%s-dealloc", queue->name);
if (IS_ERR(task))
goto kthread_err;
queue->dealloc_task = task;
if (tx_evtchn == rx_evtchn) {
/* feature-split-event-channels == 0 */
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
dev, tx_evtchn, xenvif_interrupt, 0,
queue->name, queue);
if (err < 0)
goto err;
queue->tx_irq = queue->rx_irq = err;
disable_irq(queue->tx_irq);
} else {
/* feature-split-event-channels == 1 */
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
dev, tx_evtchn, xenvif_tx_interrupt, 0,
queue->tx_irq_name, queue);
if (err < 0)
goto err;
queue->tx_irq = err;
disable_irq(queue->tx_irq);
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
dev, rx_evtchn, xenvif_rx_interrupt, 0,
queue->rx_irq_name, queue);
if (err < 0)
goto err;
queue->rx_irq = err;
disable_irq(queue->rx_irq);
}
return 0;
kthread_err:
pr_warn("Could not allocate kthread for %s\n", queue->name);
err = PTR_ERR(task);
err:
xenvif_disconnect_queue(queue);
return err;
}
void xenvif_carrier_off(struct xenvif *vif)
{
struct net_device *dev = vif->dev;
rtnl_lock();
if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
netif_carrier_off(dev); /* discard queued packets */
if (netif_running(dev))
xenvif_down(vif);
}
rtnl_unlock();
}
void xenvif_disconnect_data(struct xenvif *vif)
{
struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
xenvif_carrier_off(vif);
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
queue = &vif->queues[queue_index];
xenvif_disconnect_queue(queue);
}
xenvif_mcast_addr_list_free(vif);
}
void xenvif_disconnect_ctrl(struct xenvif *vif)
{
if (vif->ctrl_irq) {
xenvif_deinit_hash(vif);
unbind_from_irqhandler(vif->ctrl_irq, vif);
vif->ctrl_irq = 0;
}
if (vif->ctrl.sring) {
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->ctrl.sring);
vif->ctrl.sring = NULL;
}
}
/* Reverse the relevant parts of xenvif_init_queue().
* Used for queue teardown from xenvif_free(), and on the
* error handling paths in xenbus.c:connect().
*/
void xenvif_deinit_queue(struct xenvif_queue *queue)
{
gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
}
void xenvif_free(struct xenvif *vif)
{
struct xenvif_queue *queues = vif->queues;
unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
unregister_netdev(vif->dev);
free_netdev(vif->dev);
for (queue_index = 0; queue_index < num_queues; ++queue_index)
xenvif_deinit_queue(&queues[queue_index]);
vfree(queues);
module_put(THIS_MODULE);
}
|
linux-master
|
drivers/net/xen-netback/interface.c
|
/*
* Copyright (c) 2016 Citrix Systems Inc.
* Copyright (c) 2002-2005, K A Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "common.h"
#include <linux/kthread.h>
#include <xen/xen.h>
#include <xen/events.h>
/*
* Update the needed ring page slots for the first SKB queued.
* Note that any call sequence outside the RX thread calling this function
* needs to wake up the RX thread via a call of xenvif_kick_thread()
* afterwards in order to avoid a race with putting the thread to sleep.
*/
static void xenvif_update_needed_slots(struct xenvif_queue *queue,
const struct sk_buff *skb)
{
unsigned int needed = 0;
if (skb) {
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
needed++;
if (skb->sw_hash)
needed++;
}
WRITE_ONCE(queue->rx_slots_needed, needed);
}
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
unsigned int needed;
needed = READ_ONCE(queue->rx_slots_needed);
if (!needed)
return false;
do {
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
if (prod - cons >= needed)
return true;
queue->rx.sring->req_event = prod + 1;
/* Make sure event is visible before we check prod
* again.
*/
mb();
} while (queue->rx.sring->req_prod != prod);
return false;
}
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
{
unsigned long flags;
bool ret = true;
spin_lock_irqsave(&queue->rx_queue.lock, flags);
if (queue->rx_queue_len >= queue->rx_queue_max) {
struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
ret = false;
} else {
if (skb_queue_empty(&queue->rx_queue))
xenvif_update_needed_slots(queue, skb);
__skb_queue_tail(&queue->rx_queue, skb);
queue->rx_queue_len += skb->len;
}
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return ret;
}
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
{
struct sk_buff *skb;
spin_lock_irq(&queue->rx_queue.lock);
skb = __skb_dequeue(&queue->rx_queue);
if (skb) {
xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
queue->rx_queue_len -= skb->len;
if (queue->rx_queue_len < queue->rx_queue_max) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
netif_tx_wake_queue(txq);
}
}
spin_unlock_irq(&queue->rx_queue.lock);
return skb;
}
static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
{
struct sk_buff *skb;
while ((skb = xenvif_rx_dequeue(queue)) != NULL)
kfree_skb(skb);
}
static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
{
struct sk_buff *skb;
for (;;) {
skb = skb_peek(&queue->rx_queue);
if (!skb)
break;
if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
break;
xenvif_rx_dequeue(queue);
kfree_skb(skb);
queue->vif->dev->stats.rx_dropped++;
}
}
static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
{
unsigned int i;
int notify;
gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
for (i = 0; i < queue->rx_copy.num; i++) {
struct gnttab_copy *op;
op = &queue->rx_copy.op[i];
/* If the copy failed, overwrite the status field in
* the corresponding response.
*/
if (unlikely(op->status != GNTST_okay)) {
struct xen_netif_rx_response *rsp;
rsp = RING_GET_RESPONSE(&queue->rx,
queue->rx_copy.idx[i]);
rsp->status = op->status;
}
}
queue->rx_copy.num = 0;
/* Push responses for all completed packets. */
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);
__skb_queue_purge(queue->rx_copy.completed);
}
static void xenvif_rx_copy_add(struct xenvif_queue *queue,
struct xen_netif_rx_request *req,
unsigned int offset, void *data, size_t len)
{
struct gnttab_copy *op;
struct page *page;
struct xen_page_foreign *foreign;
if (queue->rx_copy.num == COPY_BATCH_SIZE)
xenvif_rx_copy_flush(queue);
op = &queue->rx_copy.op[queue->rx_copy.num];
page = virt_to_page(data);
op->flags = GNTCOPY_dest_gref;
foreign = xen_page_foreign(page);
if (foreign) {
op->source.domid = foreign->domid;
op->source.u.ref = foreign->gref;
op->flags |= GNTCOPY_source_gref;
} else {
op->source.u.gmfn = virt_to_gfn(data);
op->source.domid = DOMID_SELF;
}
op->source.offset = xen_offset_in_page(data);
op->dest.u.ref = req->gref;
op->dest.domid = queue->vif->domid;
op->dest.offset = offset;
op->len = len;
queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
queue->rx_copy.num++;
}
static unsigned int xenvif_gso_type(struct sk_buff *skb)
{
if (skb_is_gso(skb)) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
return XEN_NETIF_GSO_TYPE_TCPV4;
else
return XEN_NETIF_GSO_TYPE_TCPV6;
}
return XEN_NETIF_GSO_TYPE_NONE;
}
struct xenvif_pkt_state {
struct sk_buff *skb;
size_t remaining_len;
struct sk_buff *frag_iter;
int frag; /* frag == -1 => frag_iter->head */
unsigned int frag_offset;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
unsigned int extra_count;
unsigned int slot;
};
static void xenvif_rx_next_skb(struct xenvif_queue *queue,
struct xenvif_pkt_state *pkt)
{
struct sk_buff *skb;
unsigned int gso_type;
skb = xenvif_rx_dequeue(queue);
queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++;
/* Reset packet state. */
memset(pkt, 0, sizeof(struct xenvif_pkt_state));
pkt->skb = skb;
pkt->frag_iter = skb;
pkt->remaining_len = skb->len;
pkt->frag = -1;
gso_type = xenvif_gso_type(skb);
if ((1 << gso_type) & queue->vif->gso_mask) {
struct xen_netif_extra_info *extra;
extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
extra->u.gso.type = gso_type;
extra->u.gso.size = skb_shinfo(skb)->gso_size;
extra->u.gso.pad = 0;
extra->u.gso.features = 0;
extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
extra->flags = 0;
pkt->extra_count++;
}
if (queue->vif->xdp_headroom) {
struct xen_netif_extra_info *extra;
extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
memset(extra, 0, sizeof(struct xen_netif_extra_info));
extra->u.xdp.headroom = queue->vif->xdp_headroom;
extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
extra->flags = 0;
pkt->extra_count++;
}
if (skb->sw_hash) {
struct xen_netif_extra_info *extra;
extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
extra->u.hash.algorithm =
XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
if (skb->l4_hash)
extra->u.hash.type =
skb->protocol == htons(ETH_P_IP) ?
_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
else
extra->u.hash.type =
skb->protocol == htons(ETH_P_IP) ?
_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
*(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
extra->flags = 0;
pkt->extra_count++;
}
}
static void xenvif_rx_complete(struct xenvif_queue *queue,
struct xenvif_pkt_state *pkt)
{
/* All responses are ready to be pushed. */
queue->rx.rsp_prod_pvt = queue->rx.req_cons;
__skb_queue_tail(queue->rx_copy.completed, pkt->skb);
}
static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
{
struct sk_buff *frag_iter = pkt->frag_iter;
unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
pkt->frag++;
pkt->frag_offset = 0;
if (pkt->frag >= nr_frags) {
if (frag_iter == pkt->skb)
pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
else
pkt->frag_iter = frag_iter->next;
pkt->frag = -1;
}
}
static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
struct xenvif_pkt_state *pkt,
unsigned int offset, void **data,
size_t *len)
{
struct sk_buff *frag_iter = pkt->frag_iter;
void *frag_data;
size_t frag_len, chunk_len;
BUG_ON(!frag_iter);
if (pkt->frag == -1) {
frag_data = frag_iter->data;
frag_len = skb_headlen(frag_iter);
} else {
skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
frag_data = skb_frag_address(frag);
frag_len = skb_frag_size(frag);
}
frag_data += pkt->frag_offset;
frag_len -= pkt->frag_offset;
chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
xen_offset_in_page(frag_data));
pkt->frag_offset += chunk_len;
/* Advance to next frag? */
if (frag_len == chunk_len)
xenvif_rx_next_frag(pkt);
*data = frag_data;
*len = chunk_len;
}
static void xenvif_rx_data_slot(struct xenvif_queue *queue,
struct xenvif_pkt_state *pkt,
struct xen_netif_rx_request *req,
struct xen_netif_rx_response *rsp)
{
unsigned int offset = queue->vif->xdp_headroom;
unsigned int flags;
do {
size_t len;
void *data;
xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
xenvif_rx_copy_add(queue, req, offset, data, len);
offset += len;
pkt->remaining_len -= len;
} while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
if (pkt->remaining_len > 0)
flags = XEN_NETRXF_more_data;
else
flags = 0;
if (pkt->slot == 0) {
struct sk_buff *skb = pkt->skb;
if (skb->ip_summed == CHECKSUM_PARTIAL)
flags |= XEN_NETRXF_csum_blank |
XEN_NETRXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
flags |= XEN_NETRXF_data_validated;
if (pkt->extra_count != 0)
flags |= XEN_NETRXF_extra_info;
}
rsp->offset = 0;
rsp->flags = flags;
rsp->id = req->id;
rsp->status = (s16)offset;
}
static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
struct xenvif_pkt_state *pkt,
struct xen_netif_rx_request *req,
struct xen_netif_rx_response *rsp)
{
struct xen_netif_extra_info *extra = (void *)rsp;
unsigned int i;
pkt->extra_count--;
for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
if (pkt->extras[i].type) {
*extra = pkt->extras[i];
if (pkt->extra_count != 0)
extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
pkt->extras[i].type = 0;
return;
}
}
BUG();
}
static void xenvif_rx_skb(struct xenvif_queue *queue)
{
struct xenvif_pkt_state pkt;
xenvif_rx_next_skb(queue, &pkt);
queue->last_rx_time = jiffies;
do {
struct xen_netif_rx_request *req;
struct xen_netif_rx_response *rsp;
req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
/* Extras must go after the first data slot */
if (pkt.slot != 0 && pkt.extra_count != 0)
xenvif_rx_extra_slot(queue, &pkt, req, rsp);
else
xenvif_rx_data_slot(queue, &pkt, req, rsp);
queue->rx.req_cons++;
pkt.slot++;
} while (pkt.remaining_len > 0 || pkt.extra_count != 0);
xenvif_rx_complete(queue, &pkt);
}
#define RX_BATCH_SIZE 64
static void xenvif_rx_action(struct xenvif_queue *queue)
{
struct sk_buff_head completed_skbs;
unsigned int work_done = 0;
__skb_queue_head_init(&completed_skbs);
queue->rx_copy.completed = &completed_skbs;
while (xenvif_rx_ring_slots_available(queue) &&
!skb_queue_empty(&queue->rx_queue) &&
work_done < RX_BATCH_SIZE) {
xenvif_rx_skb(queue);
work_done++;
}
/* Flush any pending copies and complete all skbs. */
xenvif_rx_copy_flush(queue);
}
static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
{
RING_IDX prod, cons;
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
return prod - cons;
}
static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
{
unsigned int needed = READ_ONCE(queue->rx_slots_needed);
return !queue->stalled &&
xenvif_rx_queue_slots(queue) < needed &&
time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
{
unsigned int needed = READ_ONCE(queue->rx_slots_needed);
return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
}
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
{
return xenvif_rx_ring_slots_available(queue) ||
(queue->vif->stall_timeout &&
(xenvif_rx_queue_stalled(queue) ||
xenvif_rx_queue_ready(queue))) ||
(test_kthread && kthread_should_stop()) ||
queue->vif->disabled;
}
static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
{
struct sk_buff *skb;
long timeout;
skb = skb_peek(&queue->rx_queue);
if (!skb)
return MAX_SCHEDULE_TIMEOUT;
timeout = XENVIF_RX_CB(skb)->expires - jiffies;
return timeout < 0 ? 0 : timeout;
}
/* Wait until the guest Rx thread has work.
*
* The timeout needs to be adjusted based on the current head of the
* queue (and not just the head at the beginning). In particular, if
* the queue is initially empty an infinite timeout is used and this
* needs to be reduced when a skb is queued.
*
* This cannot be done with wait_event_timeout() because it only
* calculates the timeout once.
*/
static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
{
DEFINE_WAIT(wait);
if (xenvif_have_rx_work(queue, true))
return;
for (;;) {
long ret;
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
if (xenvif_have_rx_work(queue, true))
break;
if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
&queue->eoi_pending) &
(NETBK_RX_EOI | NETBK_COMMON_EOI))
xen_irq_lateeoi(queue->rx_irq, 0);
ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
if (!ret)
break;
}
finish_wait(&queue->wq, &wait);
}
static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
{
struct xenvif *vif = queue->vif;
queue->stalled = true;
/* At least one queue has stalled? Disable the carrier. */
spin_lock(&vif->lock);
if (vif->stalled_queues++ == 0) {
netdev_info(vif->dev, "Guest Rx stalled");
netif_carrier_off(vif->dev);
}
spin_unlock(&vif->lock);
}
static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
{
struct xenvif *vif = queue->vif;
queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
queue->stalled = false;
/* All queues are ready? Enable the carrier. */
spin_lock(&vif->lock);
if (--vif->stalled_queues == 0) {
netdev_info(vif->dev, "Guest Rx ready");
netif_carrier_on(vif->dev);
}
spin_unlock(&vif->lock);
}
int xenvif_kthread_guest_rx(void *data)
{
struct xenvif_queue *queue = data;
struct xenvif *vif = queue->vif;
if (!vif->stall_timeout)
xenvif_queue_carrier_on(queue);
for (;;) {
xenvif_wait_for_rx_work(queue);
if (kthread_should_stop())
break;
/* This frontend is found to be rogue, disable it in
* kthread context. Currently this is only set when
* netback finds out frontend sends malformed packet,
* but we cannot disable the interface in softirq
* context so we defer it here, if this thread is
* associated with queue 0.
*/
if (unlikely(vif->disabled && queue->id == 0)) {
xenvif_carrier_off(vif);
break;
}
if (!skb_queue_empty(&queue->rx_queue))
xenvif_rx_action(queue);
/* If the guest hasn't provided any Rx slots for a
* while it's probably not responsive, drop the
* carrier so packets are dropped earlier.
*/
if (vif->stall_timeout) {
if (xenvif_rx_queue_stalled(queue))
xenvif_queue_carrier_off(queue);
else if (xenvif_rx_queue_ready(queue))
xenvif_queue_carrier_on(queue);
}
/* Queued packets may have foreign pages from other
* domains. These cannot be queued indefinitely as
* this would starve guests of grant refs and transmit
* slots.
*/
xenvif_rx_queue_drop_expired(queue);
cond_resched();
}
/* Bin any remaining skbs */
xenvif_rx_queue_purge(queue);
return 0;
}
|
linux-master
|
drivers/net/xen-netback/rx.c
|
/*
* Back-end of the driver for virtual network devices. This portion of the
* driver exports a 'unified' network-device interface that can be accessed
* by any operating system that implements a compatible front end. A
* reference front-end implementation can be found in:
* drivers/net/xen-netfront.c
*
* Copyright (c) 2002-2005, K A Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "common.h"
#include <linux/kthread.h>
#include <linux/if_vlan.h>
#include <linux/udp.h>
#include <linux/highmem.h>
#include <net/tcp.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/memory.h>
#include <xen/page.h>
#include <asm/xen/hypercall.h>
/* Provide an option to disable split event channels at load time as
* event channels are limited resource. Split event channels are
* enabled by default.
*/
bool separate_tx_rx_irq = true;
module_param(separate_tx_rx_irq, bool, 0644);
/* The time that packets can stay on the guest Rx internal queue
* before they are dropped.
*/
unsigned int rx_drain_timeout_msecs = 10000;
module_param(rx_drain_timeout_msecs, uint, 0444);
/* The length of time before the frontend is considered unresponsive
* because it isn't providing Rx slots.
*/
unsigned int rx_stall_timeout_msecs = 60000;
module_param(rx_stall_timeout_msecs, uint, 0444);
#define MAX_QUEUES_DEFAULT 8
unsigned int xenvif_max_queues;
module_param_named(max_queues, xenvif_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
/*
* This is the maximum slots a skb can have. If a guest sends a skb
* which exceeds this limit it is considered malicious.
*/
#define FATAL_SKB_SLOTS_DEFAULT 20
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444);
/* The amount to copy out of the first guest Tx slot into the skb's
* linear area. If the first slot has more data, it will be mapped
* and put into the first frag.
*
* This is sized to avoid pulling headers from the frags for most
* TCP/IP packets.
*/
#define XEN_NETBACK_TX_COPY_LEN 128
/* This is the maximum number of flows in the hash cache. */
#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
/* The module parameter tells that we have to put data
* for xen-netfront with the XDP_PACKET_HEADROOM offset
* needed for XDP processing
*/
bool provides_xdp_headroom = true;
module_param(provides_xdp_headroom, bool, 0644);
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
unsigned int extra_count,
s8 st);
static void push_tx_responses(struct xenvif_queue *queue);
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
static inline int tx_work_todo(struct xenvif_queue *queue);
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
u16 idx)
{
return page_to_pfn(queue->mmap_pages[idx]);
}
static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
u16 idx)
{
return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
}
#define callback_param(vif, pending_idx) \
(vif->pending_tx_info[pending_idx].callback_struct)
/* Find the containing VIF's structure from a pointer in pending_tx_info array
*/
static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
{
u16 pending_idx = ubuf->desc;
struct pending_tx_info *temp =
container_of(ubuf, struct pending_tx_info, callback_struct);
return container_of(temp - pending_idx,
struct xenvif_queue,
pending_tx_info[0]);
}
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
return (u16)skb_frag_off(frag);
}
static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
{
skb_frag_off_set(frag, pending_idx);
}
static inline pending_ring_idx_t pending_index(unsigned i)
{
return i & (MAX_PENDING_REQS-1);
}
void xenvif_kick_thread(struct xenvif_queue *queue)
{
wake_up(&queue->wq);
}
void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
{
int more_to_do;
RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
if (more_to_do)
napi_schedule(&queue->napi);
else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
&queue->eoi_pending) &
(NETBK_TX_EOI | NETBK_COMMON_EOI))
xen_irq_lateeoi(queue->tx_irq, 0);
}
static void tx_add_credit(struct xenvif_queue *queue)
{
unsigned long max_burst, max_credit;
/*
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
max_burst = max(131072UL, queue->credit_bytes);
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
max_credit = queue->remaining_credit + queue->credit_bytes;
if (max_credit < queue->remaining_credit)
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
queue->remaining_credit = min(max_credit, max_burst);
queue->rate_limited = false;
}
void xenvif_tx_credit_callback(struct timer_list *t)
{
struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
tx_add_credit(queue);
xenvif_napi_schedule_or_enable_events(queue);
}
static void xenvif_tx_err(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
unsigned int extra_count, RING_IDX end)
{
RING_IDX cons = queue->tx.req_cons;
unsigned long flags;
do {
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
RING_COPY_REQUEST(&queue->tx, cons++, txp);
extra_count = 0; /* only the first frag can have extras */
} while (1);
queue->tx.req_cons = cons;
}
static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
vif->disabled = true;
/* Disable the vif from queue 0's kthread */
if (vif->num_queues)
xenvif_kick_thread(&vif->queues[0]);
}
static int xenvif_count_requests(struct xenvif_queue *queue,
struct xen_netif_tx_request *first,
unsigned int extra_count,
struct xen_netif_tx_request *txp,
int work_to_do)
{
RING_IDX cons = queue->tx.req_cons;
int slots = 0;
int drop_err = 0;
int more_data;
if (!(first->flags & XEN_NETTXF_more_data))
return 0;
do {
struct xen_netif_tx_request dropped_tx = { 0 };
if (slots >= work_to_do) {
netdev_err(queue->vif->dev,
"Asked for %d slots but exceeds this limit\n",
work_to_do);
xenvif_fatal_tx_err(queue->vif);
return -ENODATA;
}
/* This guest is really using too many slots and
* considered malicious.
*/
if (unlikely(slots >= fatal_skb_slots)) {
netdev_err(queue->vif->dev,
"Malicious frontend using %d slots, threshold %u\n",
slots, fatal_skb_slots);
xenvif_fatal_tx_err(queue->vif);
return -E2BIG;
}
/* Xen network protocol had implicit dependency on
* MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
* the historical MAX_SKB_FRAGS value 18 to honor the
* same behavior as before. Any packet using more than
* 18 slots but less than fatal_skb_slots slots is
* dropped
*/
if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Too many slots (%d) exceeding limit (%d), dropping packet\n",
slots, XEN_NETBK_LEGACY_SLOTS_MAX);
drop_err = -E2BIG;
}
if (drop_err)
txp = &dropped_tx;
RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
/* If the guest submitted a frame >= 64 KiB then
* first->size overflowed and following slots will
* appear to be larger than the frame.
*
* This cannot be fatal error as there are buggy
* frontends that do this.
*
* Consume all slots and drop the packet.
*/
if (!drop_err && txp->size > first->size) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Invalid tx request, slot size %u > remaining size %u\n",
txp->size, first->size);
drop_err = -EIO;
}
first->size -= txp->size;
slots++;
if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
txp->offset, txp->size);
xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
}
more_data = txp->flags & XEN_NETTXF_more_data;
if (!drop_err)
txp++;
} while (more_data);
if (drop_err) {
xenvif_tx_err(queue, first, extra_count, cons + slots);
return drop_err;
}
return slots;
}
struct xenvif_tx_cb {
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
u8 copy_count;
u32 split_mask;
};
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
u16 pending_idx,
struct xen_netif_tx_request *txp,
unsigned int extra_count,
struct gnttab_map_grant_ref *mop)
{
queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map | GNTMAP_readonly,
txp->gref, queue->vif->domid);
memcpy(&queue->pending_tx_info[pending_idx].req, txp,
sizeof(*txp));
queue->pending_tx_info[pending_idx].extra_count = extra_count;
}
static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
{
struct sk_buff *skb =
alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
if (unlikely(skb == NULL))
return NULL;
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
/* Initialize it here to avoid later surprises */
skb_shinfo(skb)->destructor_arg = NULL;
return skb;
}
static void xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txfrags,
unsigned *copy_ops,
unsigned *map_ops,
unsigned int frag_overflow,
struct sk_buff *nskb,
unsigned int extra_count,
unsigned int data_len)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx;
pending_ring_idx_t index;
unsigned int nr_slots;
struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
struct xen_netif_tx_request *txp = first;
nr_slots = shinfo->nr_frags + frag_overflow + 1;
copy_count(skb) = 0;
XENVIF_TX_CB(skb)->split_mask = 0;
/* Create copy ops for exactly data_len bytes into the skb head. */
__skb_put(skb, data_len);
while (data_len > 0) {
int amount = data_len > txp->size ? txp->size : data_len;
bool split = false;
cop->source.u.ref = txp->gref;
cop->source.domid = queue->vif->domid;
cop->source.offset = txp->offset;
cop->dest.domid = DOMID_SELF;
cop->dest.offset = (offset_in_page(skb->data +
skb_headlen(skb) -
data_len)) & ~XEN_PAGE_MASK;
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
- data_len);
/* Don't cross local page boundary! */
if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
amount = XEN_PAGE_SIZE - cop->dest.offset;
XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
split = true;
}
cop->len = amount;
cop->flags = GNTCOPY_source_gref;
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
callback_param(queue, pending_idx).ctx = NULL;
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
if (!split)
copy_count(skb)++;
cop++;
data_len -= amount;
if (amount == txp->size) {
/* The copy op covered the full tx_request */
memcpy(&queue->pending_tx_info[pending_idx].req,
txp, sizeof(*txp));
queue->pending_tx_info[pending_idx].extra_count =
(txp == first) ? extra_count : 0;
if (txp == first)
txp = txfrags;
else
txp++;
queue->pending_cons++;
nr_slots--;
} else {
/* The copy op partially covered the tx_request.
* The remainder will be mapped or copied in the next
* iteration.
*/
txp->offset += amount;
txp->size -= amount;
}
}
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
shinfo->nr_frags++, gop++, nr_slots--) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
if (txp == first)
txp = txfrags;
else
txp++;
}
if (nr_slots > 0) {
shinfo = skb_shinfo(nskb);
frags = shinfo->frags;
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
gop);
frag_set_pending_idx(&frags[shinfo->nr_frags],
pending_idx);
}
skb_shinfo(skb)->frag_list = nskb;
} else if (nskb) {
/* A frag_list skb was allocated but it is no longer needed
* because enough slots were converted to copy ops above.
*/
kfree_skb(nskb);
}
(*copy_ops) = cop - queue->tx_copy_ops;
(*map_ops) = gop - queue->tx_map_ops;
}
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
u16 pending_idx,
grant_handle_t handle)
{
if (unlikely(queue->grant_tx_handle[pending_idx] !=
NETBACK_INVALID_HANDLE)) {
netdev_err(queue->vif->dev,
"Trying to overwrite active handle! pending_idx: 0x%x\n",
pending_idx);
BUG();
}
queue->grant_tx_handle[pending_idx] = handle;
}
static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
u16 pending_idx)
{
if (unlikely(queue->grant_tx_handle[pending_idx] ==
NETBACK_INVALID_HANDLE)) {
netdev_err(queue->vif->dev,
"Trying to unmap invalid handle! pending_idx: 0x%x\n",
pending_idx);
BUG();
}
queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
}
static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct sk_buff *skb,
struct gnttab_map_grant_ref **gopp_map,
struct gnttab_copy **gopp_copy)
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx;
/* This always points to the shinfo of the skb being checked, which
* could be either the first or the one on the frag_list
*/
struct skb_shared_info *shinfo = skb_shinfo(skb);
/* If this is non-NULL, we are currently checking the frag_list skb, and
* this points to the shinfo of the first one
*/
struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
const bool sharedslot = nr_frags &&
frag_get_pending_idx(&shinfo->frags[0]) ==
copy_pending_idx(skb, copy_count(skb) - 1);
int i, err = 0;
for (i = 0; i < copy_count(skb); i++) {
int newerr;
/* Check status of header. */
pending_idx = copy_pending_idx(skb, i);
newerr = (*gopp_copy)->status;
/* Split copies need to be handled together. */
if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
(*gopp_copy)++;
if (!newerr)
newerr = (*gopp_copy)->status;
}
if (likely(!newerr)) {
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
} else {
err = newerr;
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
}
(*gopp_copy)++;
}
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
int j, newerr;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
/* Check error status: if okay then remember grant handle. */
newerr = gop_map->status;
if (likely(!newerr)) {
xenvif_grant_handle_set(queue,
pending_idx,
gop_map->handle);
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err)) {
xenvif_idx_unmap(queue, pending_idx);
/* If the mapping of the first frag was OK, but
* the header's copy failed, and they are
* sharing a slot, send an error
*/
if (i == 0 && !first_shinfo && sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
else
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
continue;
}
/* Error on this fragment: respond to client with an error. */
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
i,
gop_map->status,
pending_idx,
gop_map->ref);
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
/* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx);
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
/* And if we found the error while checking the frag_list, unmap
* the first skb's frags
*/
if (first_shinfo) {
for (j = 0; j < first_shinfo->nr_frags; j++) {
pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx);
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
if (skb_has_frag_list(skb) && !first_shinfo) {
first_shinfo = shinfo;
shinfo = skb_shinfo(shinfo->frag_list);
nr_frags = shinfo->nr_frags;
goto check_frags;
}
*gopp_map = gop_map;
return err;
}
static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i;
u16 prev_pending_idx = INVALID_PENDING_IDX;
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
struct xen_netif_tx_request *txp;
struct page *page;
u16 pending_idx;
pending_idx = frag_get_pending_idx(frag);
/* If this is not the first frag, chain it to the previous*/
if (prev_pending_idx == INVALID_PENDING_IDX)
skb_shinfo(skb)->destructor_arg =
&callback_param(queue, pending_idx);
else
callback_param(queue, prev_pending_idx).ctx =
&callback_param(queue, pending_idx);
callback_param(queue, pending_idx).ctx = NULL;
prev_pending_idx = pending_idx;
txp = &queue->pending_tx_info[pending_idx].req;
page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
/* Take an extra reference to offset network stack's put_page */
get_page(queue->mmap_pages[pending_idx]);
}
}
static int xenvif_get_extras(struct xenvif_queue *queue,
struct xen_netif_extra_info *extras,
unsigned int *extra_count,
int work_to_do)
{
struct xen_netif_extra_info extra;
RING_IDX cons = queue->tx.req_cons;
do {
if (unlikely(work_to_do-- <= 0)) {
netdev_err(queue->vif->dev, "Missing extra info\n");
xenvif_fatal_tx_err(queue->vif);
return -EBADR;
}
RING_COPY_REQUEST(&queue->tx, cons, &extra);
queue->tx.req_cons = ++cons;
(*extra_count)++;
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
netdev_err(queue->vif->dev,
"Invalid extra type: %d\n", extra.type);
xenvif_fatal_tx_err(queue->vif);
return -EINVAL;
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
}
static int xenvif_set_skb_gso(struct xenvif *vif,
struct sk_buff *skb,
struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
netdev_err(vif->dev, "GSO size must not be zero.\n");
xenvif_fatal_tx_err(vif);
return -EINVAL;
}
switch (gso->u.gso.type) {
case XEN_NETIF_GSO_TYPE_TCPV4:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case XEN_NETIF_GSO_TYPE_TCPV6:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
default:
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
xenvif_fatal_tx_err(vif);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
/* gso_segs will be calculated later */
return 0;
}
static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
{
bool recalculate_partial_csum = false;
/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
* peers can fail to set NETRXF_csum_blank when sending a GSO
* frame. In this case force the SKB to CHECKSUM_PARTIAL and
* recalculate the partial checksum.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
queue->stats.rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = true;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
return skb_checksum_setup(skb, recalculate_partial_csum);
}
static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
{
u64 now = get_jiffies_64();
u64 next_credit = queue->credit_window_start +
msecs_to_jiffies(queue->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
if (timer_pending(&queue->credit_timeout)) {
queue->rate_limited = true;
return true;
}
/* Passed the point where we can replenish credit? */
if (time_after_eq64(now, next_credit)) {
queue->credit_window_start = now;
tx_add_credit(queue);
}
/* Still too big to send right now? Set a callback. */
if (size > queue->remaining_credit) {
mod_timer(&queue->credit_timeout,
next_credit);
queue->credit_window_start = next_credit;
queue->rate_limited = true;
return true;
}
return false;
}
/* No locking is required in xenvif_mcast_add/del() as they are
* only ever invoked from NAPI poll. An RCU list is used because
* xenvif_mcast_match() is called asynchronously, during start_xmit.
*/
static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
{
struct xenvif_mcast_addr *mcast;
if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
if (net_ratelimit())
netdev_err(vif->dev,
"Too many multicast addresses\n");
return -ENOSPC;
}
mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
if (!mcast)
return -ENOMEM;
ether_addr_copy(mcast->addr, addr);
list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
vif->fe_mcast_count++;
return 0;
}
static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
{
struct xenvif_mcast_addr *mcast;
list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
if (ether_addr_equal(addr, mcast->addr)) {
--vif->fe_mcast_count;
list_del_rcu(&mcast->entry);
kfree_rcu(mcast, rcu);
break;
}
}
}
bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
{
struct xenvif_mcast_addr *mcast;
rcu_read_lock();
list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
if (ether_addr_equal(addr, mcast->addr)) {
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
return false;
}
void xenvif_mcast_addr_list_free(struct xenvif *vif)
{
/* No need for locking or RCU here. NAPI poll and TX queue
* are stopped.
*/
while (!list_empty(&vif->fe_mcast_addr)) {
struct xenvif_mcast_addr *mcast;
mcast = list_first_entry(&vif->fe_mcast_addr,
struct xenvif_mcast_addr,
entry);
--vif->fe_mcast_count;
list_del(&mcast->entry);
kfree(mcast);
}
}
static void xenvif_tx_build_gops(struct xenvif_queue *queue,
int budget,
unsigned *copy_ops,
unsigned *map_ops)
{
struct sk_buff *skb, *nskb;
int ret;
unsigned int frag_overflow;
while (skb_queue_len(&queue->tx_queue) < budget) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
unsigned int extra_count;
RING_IDX idx;
int work_to_do;
unsigned int data_len;
if (queue->tx.sring->req_prod - queue->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
netdev_err(queue->vif->dev,
"Impossible number of requests. "
"req_prod %d, req_cons %d, size %ld\n",
queue->tx.sring->req_prod, queue->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
xenvif_fatal_tx_err(queue->vif);
break;
}
work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
if (!work_to_do)
break;
idx = queue->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
RING_COPY_REQUEST(&queue->tx, idx, &txreq);
/* Credit-based scheduling. */
if (txreq.size > queue->remaining_credit &&
tx_credit_exceeded(queue, txreq.size))
break;
queue->remaining_credit -= txreq.size;
work_to_do--;
queue->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
extra_count = 0;
if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xenvif_get_extras(queue, extras,
&extra_count,
work_to_do);
idx = queue->tx.req_cons;
if (unlikely(work_to_do < 0))
break;
}
if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
struct xen_netif_extra_info *extra;
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
make_tx_response(queue, &txreq, extra_count,
(ret == 0) ?
XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR);
push_tx_responses(queue);
continue;
}
if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
struct xen_netif_extra_info *extra;
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
make_tx_response(queue, &txreq, extra_count,
XEN_NETIF_RSP_OKAY);
push_tx_responses(queue);
continue;
}
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
XEN_NETBACK_TX_COPY_LEN : txreq.size;
ret = xenvif_count_requests(queue, &txreq, extra_count,
txfrags, work_to_do);
if (unlikely(ret < 0))
break;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(queue->vif->dev,
"Bad packet size: %d\n", txreq.size);
xenvif_tx_err(queue, &txreq, extra_count, idx);
break;
}
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
txreq.offset, txreq.size);
xenvif_fatal_tx_err(queue->vif);
break;
}
if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
data_len = txreq.size;
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
netdev_dbg(queue->vif->dev,
"Can't allocate a skb in start_xmit.\n");
xenvif_tx_err(queue, &txreq, extra_count, idx);
break;
}
skb_shinfo(skb)->nr_frags = ret;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
frag_overflow = 0;
nskb = NULL;
if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
BUG_ON(frag_overflow > MAX_SKB_FRAGS);
skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
xenvif_tx_err(queue, &txreq, extra_count, idx);
if (net_ratelimit())
netdev_err(queue->vif->dev,
"Can't allocate the frag_list skb.\n");
break;
}
}
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
kfree_skb(nskb);
break;
}
}
if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
struct xen_netif_extra_info *extra;
enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
switch (extra->u.hash.type) {
case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
type = PKT_HASH_TYPE_L3;
break;
case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
type = PKT_HASH_TYPE_L4;
break;
default:
break;
}
if (type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb,
*(u32 *)extra->u.hash.value,
type);
}
xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
map_ops, frag_overflow, nskb, extra_count,
data_len);
__skb_queue_tail(&queue->tx_queue, skb);
queue->tx.req_cons = idx;
}
return;
}
/* Consolidate skb with a frag_list into a brand new one with local pages on
* frags. Returns 0 or -ENOMEM if can't allocate new pages.
*/
static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
{
unsigned int offset = skb_headlen(skb);
skb_frag_t frags[MAX_SKB_FRAGS];
int i, f;
struct ubuf_info *uarg;
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
queue->stats.tx_zerocopy_sent += 2;
queue->stats.tx_frag_overflow++;
xenvif_fill_frags(queue, nskb);
/* Subtract frags size, we will correct it later */
skb->truesize -= skb->data_len;
skb->len += nskb->len;
skb->data_len += nskb->len;
/* create a brand new frags array and coalesce there */
for (i = 0; offset < skb->len; i++) {
struct page *page;
unsigned int len;
BUG_ON(i >= MAX_SKB_FRAGS);
page = alloc_page(GFP_ATOMIC);
if (!page) {
int j;
skb->truesize += skb->data_len;
for (j = 0; j < i; j++)
put_page(skb_frag_page(&frags[j]));
return -ENOMEM;
}
if (offset + PAGE_SIZE < skb->len)
len = PAGE_SIZE;
else
len = skb->len - offset;
if (skb_copy_bits(skb, offset, page_address(page), len))
BUG();
offset += len;
skb_frag_fill_page_desc(&frags[i], page, 0, len);
}
/* Release all the original (foreign) frags. */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb_frag_unref(skb, f);
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
uarg->callback(NULL, uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
/* Fill the skb with the new (local) frags. */
memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
skb_shinfo(skb)->nr_frags = i;
skb->truesize += i * PAGE_SIZE;
return 0;
}
static int xenvif_tx_submit(struct xenvif_queue *queue)
{
struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
struct gnttab_copy *gop_copy = queue->tx_copy_ops;
struct sk_buff *skb;
int work_done = 0;
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
pending_idx = copy_pending_idx(skb, 0);
txp = &queue->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
/* If there was an error, xenvif_tx_check_gop is
* expected to release all the frags which were mapped,
* so kfree_skb shouldn't do it again
*/
skb_shinfo(skb)->nr_frags = 0;
if (skb_has_frag_list(skb)) {
struct sk_buff *nskb =
skb_shinfo(skb)->frag_list;
skb_shinfo(nskb)->nr_frags = 0;
}
kfree_skb(skb);
continue;
}
if (txp->flags & XEN_NETTXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
xenvif_skb_zerocopy_prepare(queue, nskb);
if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
"Not enough memory to consolidate frag_list!\n");
xenvif_skb_zerocopy_prepare(queue, skb);
kfree_skb(skb);
continue;
}
/* Copied all the bits from the frag list -- free it. */
skb_frag_list_init(skb);
kfree_skb(nskb);
}
skb->dev = queue->vif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
skb_reset_network_header(skb);
if (checksum_setup(queue, skb)) {
netdev_dbg(queue->vif->dev,
"Can't setup checksum in net_tx_action\n");
/* We have to set this flag to trigger the callback */
if (skb_shinfo(skb)->destructor_arg)
xenvif_skb_zerocopy_prepare(queue, skb);
kfree_skb(skb);
continue;
}
skb_probe_transport_header(skb);
/* If the packet is GSO then we will have just set up the
* transport header offset in checksum_setup so it's now
* straightforward to calculate gso_segs.
*/
if (skb_is_gso(skb)) {
int mss, hdrlen;
/* GSO implies having the L4 header. */
WARN_ON_ONCE(!skb_transport_header_was_set(skb));
if (unlikely(!skb_transport_header_was_set(skb))) {
kfree_skb(skb);
continue;
}
mss = skb_shinfo(skb)->gso_size;
hdrlen = skb_tcp_all_headers(skb);
skb_shinfo(skb)->gso_segs =
DIV_ROUND_UP(skb->len - hdrlen, mss);
}
queue->stats.rx_bytes += skb->len;
queue->stats.rx_packets++;
work_done++;
/* Set this flag right before netif_receive_skb, otherwise
* someone might think this packet already left netback, and
* do a skb_copy_ubufs while we are still in control of the
* skb. E.g. the __pskb_pull_tail earlier can do such thing.
*/
if (skb_shinfo(skb)->destructor_arg) {
xenvif_skb_zerocopy_prepare(queue, skb);
queue->stats.tx_zerocopy_sent++;
}
netif_receive_skb(skb);
}
return work_done;
}
void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
struct xenvif_queue *queue = ubuf_to_queue(ubuf);
/* This is the only place where we grab this lock, to protect callbacks
* from each other.
*/
spin_lock_irqsave(&queue->callback_lock, flags);
do {
u16 pending_idx = ubuf->desc;
ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
MAX_PENDING_REQS);
index = pending_index(queue->dealloc_prod);
queue->dealloc_ring[index] = pending_idx;
/* Sync with xenvif_tx_dealloc_action:
* insert idx then incr producer.
*/
smp_wmb();
queue->dealloc_prod++;
} while (ubuf);
spin_unlock_irqrestore(&queue->callback_lock, flags);
if (likely(zerocopy_success))
queue->stats.tx_zerocopy_success++;
else
queue->stats.tx_zerocopy_fail++;
xenvif_skb_zerocopy_complete(queue);
}
static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
{
struct gnttab_unmap_grant_ref *gop;
pending_ring_idx_t dc, dp;
u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
unsigned int i = 0;
dc = queue->dealloc_cons;
gop = queue->tx_unmap_ops;
/* Free up any grants we have finished using */
do {
dp = queue->dealloc_prod;
/* Ensure we see all indices enqueued by all
* xenvif_zerocopy_callback().
*/
smp_rmb();
while (dc != dp) {
BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
pending_idx =
queue->dealloc_ring[pending_index(dc++)];
pending_idx_release[gop - queue->tx_unmap_ops] =
pending_idx;
queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
queue->mmap_pages[pending_idx];
gnttab_set_unmap_op(gop,
idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map,
queue->grant_tx_handle[pending_idx]);
xenvif_grant_handle_reset(queue, pending_idx);
++gop;
}
} while (dp != queue->dealloc_prod);
queue->dealloc_cons = dc;
if (gop - queue->tx_unmap_ops > 0) {
int ret;
ret = gnttab_unmap_refs(queue->tx_unmap_ops,
NULL,
queue->pages_to_unmap,
gop - queue->tx_unmap_ops);
if (ret) {
netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
gop - queue->tx_unmap_ops, ret);
for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
if (gop[i].status != GNTST_okay)
netdev_err(queue->vif->dev,
" host_addr: 0x%llx handle: 0x%x status: %d\n",
gop[i].host_addr,
gop[i].handle,
gop[i].status);
}
BUG();
}
}
for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
xenvif_idx_release(queue, pending_idx_release[i],
XEN_NETIF_RSP_OKAY);
}
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
{
unsigned nr_mops = 0, nr_cops = 0;
int work_done, ret;
if (unlikely(!tx_work_todo(queue)))
return 0;
xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
if (nr_cops == 0)
return 0;
gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
if (nr_mops != 0) {
ret = gnttab_map_refs(queue->tx_map_ops,
NULL,
queue->pages_to_map,
nr_mops);
if (ret) {
unsigned int i;
netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
nr_mops, ret);
for (i = 0; i < nr_mops; ++i)
WARN_ON_ONCE(queue->tx_map_ops[i].status ==
GNTST_okay);
}
}
work_done = xenvif_tx_submit(queue);
return work_done;
}
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status)
{
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
unsigned long flags;
pending_tx_info = &queue->pending_tx_info[pending_idx];
spin_lock_irqsave(&queue->response_lock, flags);
make_tx_response(queue, &pending_tx_info->req,
pending_tx_info->extra_count, status);
/* Release the pending index before pusing the Tx response so
* its available before a new Tx request is pushed by the
* frontend.
*/
index = pending_index(queue->pending_prod++);
queue->pending_ring[index] = pending_idx;
push_tx_responses(queue);
spin_unlock_irqrestore(&queue->response_lock, flags);
}
static void make_tx_response(struct xenvif_queue *queue,
struct xen_netif_tx_request *txp,
unsigned int extra_count,
s8 st)
{
RING_IDX i = queue->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
resp = RING_GET_RESPONSE(&queue->tx, i);
resp->id = txp->id;
resp->status = st;
while (extra_count-- != 0)
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
queue->tx.rsp_prod_pvt = ++i;
}
static void push_tx_responses(struct xenvif_queue *queue)
{
int notify;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
}
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
struct gnttab_unmap_grant_ref tx_unmap_op;
gnttab_set_unmap_op(&tx_unmap_op,
idx_to_kaddr(queue, pending_idx),
GNTMAP_host_map,
queue->grant_tx_handle[pending_idx]);
xenvif_grant_handle_reset(queue, pending_idx);
ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
&queue->mmap_pages[pending_idx], 1);
if (ret) {
netdev_err(queue->vif->dev,
"Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
ret,
pending_idx,
tx_unmap_op.host_addr,
tx_unmap_op.handle,
tx_unmap_op.status);
BUG();
}
}
static inline int tx_work_todo(struct xenvif_queue *queue)
{
if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
return 1;
return 0;
}
static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
{
return queue->dealloc_cons != queue->dealloc_prod;
}
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
{
if (queue->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
queue->tx.sring);
if (queue->rx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
queue->rx.sring);
}
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
RING_IDX rsp_prod, req_prod;
int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&tx_ring_ref, 1, &addr);
if (err)
goto err;
txs = (struct xen_netif_tx_sring *)addr;
rsp_prod = READ_ONCE(txs->rsp_prod);
req_prod = READ_ONCE(txs->req_prod);
BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
err = -EIO;
if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
goto err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&rx_ring_ref, 1, &addr);
if (err)
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
rsp_prod = READ_ONCE(rxs->rsp_prod);
req_prod = READ_ONCE(rxs->req_prod);
BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
err = -EIO;
if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
goto err;
return 0;
err:
xenvif_unmap_frontend_data_rings(queue);
return err;
}
static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
{
/* Dealloc thread must remain running until all inflight
* packets complete.
*/
return kthread_should_stop() &&
!atomic_read(&queue->inflight_packets);
}
int xenvif_dealloc_kthread(void *data)
{
struct xenvif_queue *queue = data;
for (;;) {
wait_event_interruptible(queue->dealloc_wq,
tx_dealloc_work_todo(queue) ||
xenvif_dealloc_kthread_should_stop(queue));
if (xenvif_dealloc_kthread_should_stop(queue))
break;
xenvif_tx_dealloc_action(queue);
cond_resched();
}
/* Unmap anything remaining*/
if (tx_dealloc_work_todo(queue))
xenvif_tx_dealloc_action(queue);
return 0;
}
static void make_ctrl_response(struct xenvif *vif,
const struct xen_netif_ctrl_request *req,
u32 status, u32 data)
{
RING_IDX idx = vif->ctrl.rsp_prod_pvt;
struct xen_netif_ctrl_response rsp = {
.id = req->id,
.type = req->type,
.status = status,
.data = data,
};
*RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
vif->ctrl.rsp_prod_pvt = ++idx;
}
static void push_ctrl_response(struct xenvif *vif)
{
int notify;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
if (notify)
notify_remote_via_irq(vif->ctrl_irq);
}
static void process_ctrl_request(struct xenvif *vif,
const struct xen_netif_ctrl_request *req)
{
u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
u32 data = 0;
switch (req->type) {
case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
status = xenvif_set_hash_alg(vif, req->data[0]);
break;
case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
status = xenvif_get_hash_flags(vif, &data);
break;
case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
status = xenvif_set_hash_flags(vif, req->data[0]);
break;
case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
status = xenvif_set_hash_key(vif, req->data[0],
req->data[1]);
break;
case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
status = XEN_NETIF_CTRL_STATUS_SUCCESS;
data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
break;
case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
status = xenvif_set_hash_mapping_size(vif,
req->data[0]);
break;
case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
status = xenvif_set_hash_mapping(vif, req->data[0],
req->data[1],
req->data[2]);
break;
default:
break;
}
make_ctrl_response(vif, req, status, data);
push_ctrl_response(vif);
}
static void xenvif_ctrl_action(struct xenvif *vif)
{
for (;;) {
RING_IDX req_prod, req_cons;
req_prod = vif->ctrl.sring->req_prod;
req_cons = vif->ctrl.req_cons;
/* Make sure we can see requests before we process them. */
rmb();
if (req_cons == req_prod)
break;
while (req_cons != req_prod) {
struct xen_netif_ctrl_request req;
RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
req_cons++;
process_ctrl_request(vif, &req);
}
vif->ctrl.req_cons = req_cons;
vif->ctrl.sring->req_event = req_cons + 1;
}
}
static bool xenvif_ctrl_work_todo(struct xenvif *vif)
{
if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
return true;
return false;
}
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
{
struct xenvif *vif = data;
unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
while (xenvif_ctrl_work_todo(vif)) {
xenvif_ctrl_action(vif);
eoi_flag = 0;
}
xen_irq_lateeoi(irq, eoi_flag);
return IRQ_HANDLED;
}
static int __init netback_init(void)
{
int rc = 0;
if (!xen_domain())
return -ENODEV;
/* Allow as many queues as there are CPUs but max. 8 if user has not
* specified a value.
*/
if (xenvif_max_queues == 0)
xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
num_online_cpus());
if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
}
rc = xenvif_xenbus_init();
if (rc)
goto failed_init;
#ifdef CONFIG_DEBUG_FS
xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
#endif /* CONFIG_DEBUG_FS */
return 0;
failed_init:
return rc;
}
module_init(netback_init);
static void __exit netback_fini(void)
{
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(xen_netback_dbg_root);
#endif /* CONFIG_DEBUG_FS */
xenvif_xenbus_fini();
}
module_exit(netback_fini);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vif");
|
linux-master
|
drivers/net/xen-netback/netback.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Xenbus code for netif backend
*
* Copyright (C) 2005 Rusty Russell <[email protected]>
* Copyright (C) 2005 XenSource Ltd
*/
#include "common.h"
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
static int connect_data_rings(struct backend_info *be,
struct xenvif_queue *queue);
static void connect(struct backend_info *be);
static int read_xenbus_vif_flags(struct backend_info *be);
static int backend_create_xenvif(struct backend_info *be);
static void unregister_hotplug_status_watch(struct backend_info *be);
static void xen_unregister_watchers(struct xenvif *vif);
static void set_backend_state(struct backend_info *be,
enum xenbus_state state);
#ifdef CONFIG_DEBUG_FS
struct dentry *xen_netback_dbg_root = NULL;
static int xenvif_read_io_ring(struct seq_file *m, void *v)
{
struct xenvif_queue *queue = m->private;
struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
struct netdev_queue *dev_queue;
if (tx_ring->sring) {
struct xen_netif_tx_sring *sring = tx_ring->sring;
seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
tx_ring->nr_ents);
seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
sring->req_prod,
sring->req_prod - sring->rsp_prod,
tx_ring->req_cons,
tx_ring->req_cons - sring->rsp_prod,
sring->req_event,
sring->req_event - sring->rsp_prod);
seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
sring->rsp_prod,
tx_ring->rsp_prod_pvt,
tx_ring->rsp_prod_pvt - sring->rsp_prod,
sring->rsp_event,
sring->rsp_event - sring->rsp_prod);
seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
queue->pending_prod,
queue->pending_cons,
nr_pending_reqs(queue));
seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
queue->dealloc_prod,
queue->dealloc_cons,
queue->dealloc_prod - queue->dealloc_cons);
}
if (rx_ring->sring) {
struct xen_netif_rx_sring *sring = rx_ring->sring;
seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
sring->req_prod,
sring->req_prod - sring->rsp_prod,
rx_ring->req_cons,
rx_ring->req_cons - sring->rsp_prod,
sring->req_event,
sring->req_event - sring->rsp_prod);
seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
sring->rsp_prod,
rx_ring->rsp_prod_pvt,
rx_ring->rsp_prod_pvt - sring->rsp_prod,
sring->rsp_event,
sring->rsp_event - sring->rsp_prod);
}
seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
"Credit timer_pending: %d, credit: %lu, usec: %lu\n"
"remaining: %lu, expires: %lu, now: %lu\n",
queue->napi.state, queue->napi.weight,
skb_queue_len(&queue->tx_queue),
timer_pending(&queue->credit_timeout),
queue->credit_bytes,
queue->credit_usec,
queue->remaining_credit,
queue->credit_timeout.expires,
jiffies);
dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
queue->rx_queue_len, queue->rx_queue_max,
skb_queue_len(&queue->rx_queue),
netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
return 0;
}
#define XENVIF_KICK_STR "kick"
#define BUFFER_SIZE 32
static ssize_t
xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
loff_t *ppos)
{
struct xenvif_queue *queue =
((struct seq_file *)filp->private_data)->private;
int len;
char write[BUFFER_SIZE];
/* don't allow partial writes and check the length */
if (*ppos != 0)
return 0;
if (count >= sizeof(write))
return -ENOSPC;
len = simple_write_to_buffer(write,
sizeof(write) - 1,
ppos,
buf,
count);
if (len < 0)
return len;
write[len] = '\0';
if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
xenvif_interrupt(0, (void *)queue);
else {
pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
queue->id);
count = -EINVAL;
}
return count;
}
static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
{
int ret;
void *queue = NULL;
if (inode->i_private)
queue = inode->i_private;
ret = single_open(filp, xenvif_read_io_ring, queue);
filp->f_mode |= FMODE_PWRITE;
return ret;
}
static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
.owner = THIS_MODULE,
.open = xenvif_io_ring_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = xenvif_write_io_ring,
};
static int xenvif_ctrl_show(struct seq_file *m, void *v)
{
struct xenvif *vif = m->private;
xenvif_dump_hash_info(vif, m);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
static void xenvif_debugfs_addif(struct xenvif *vif)
{
int i;
vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
xen_netback_dbg_root);
for (i = 0; i < vif->num_queues; ++i) {
char filename[sizeof("io_ring_q") + 4];
snprintf(filename, sizeof(filename), "io_ring_q%d", i);
debugfs_create_file(filename, 0600, vif->xenvif_dbg_root,
&vif->queues[i],
&xenvif_dbg_io_ring_ops_fops);
}
if (vif->ctrl_irq)
debugfs_create_file("ctrl", 0400, vif->xenvif_dbg_root, vif,
&xenvif_ctrl_fops);
}
static void xenvif_debugfs_delif(struct xenvif *vif)
{
debugfs_remove_recursive(vif->xenvif_dbg_root);
vif->xenvif_dbg_root = NULL;
}
#endif /* CONFIG_DEBUG_FS */
/*
* Handle the creation of the hotplug script environment. We add the script
* and vif variables to the environment, for the benefit of the vif-* hotplug
* scripts.
*/
static int netback_uevent(const struct xenbus_device *xdev,
struct kobj_uevent_env *env)
{
struct backend_info *be = dev_get_drvdata(&xdev->dev);
if (!be)
return 0;
if (add_uevent_var(env, "script=%s", be->hotplug_script))
return -ENOMEM;
if (!be->vif)
return 0;
return add_uevent_var(env, "vif=%s", be->vif->dev->name);
}
static int backend_create_xenvif(struct backend_info *be)
{
int err;
long handle;
struct xenbus_device *dev = be->dev;
struct xenvif *vif;
if (be->vif != NULL)
return 0;
err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
if (err != 1) {
xenbus_dev_fatal(dev, err, "reading handle");
return (err < 0) ? err : -EINVAL;
}
vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
if (IS_ERR(vif)) {
err = PTR_ERR(vif);
xenbus_dev_fatal(dev, err, "creating interface");
return err;
}
be->vif = vif;
vif->be = be;
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
return 0;
}
static void backend_disconnect(struct backend_info *be)
{
struct xenvif *vif = be->vif;
if (vif) {
unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
xen_unregister_watchers(vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(vif);
/* At this point some of the handlers may still be active
* so we need to have additional synchronization here.
*/
vif->num_queues = 0;
synchronize_net();
for (queue_index = 0; queue_index < num_queues; ++queue_index)
xenvif_deinit_queue(&vif->queues[queue_index]);
vfree(vif->queues);
vif->queues = NULL;
xenvif_disconnect_ctrl(vif);
}
}
static void backend_connect(struct backend_info *be)
{
if (be->vif)
connect(be);
}
static inline void backend_switch_state(struct backend_info *be,
enum xenbus_state state)
{
struct xenbus_device *dev = be->dev;
pr_debug("%s -> %s\n", dev->nodename, xenbus_strstate(state));
be->state = state;
/* If we are waiting for a hotplug script then defer the
* actual xenbus state change.
*/
if (!be->have_hotplug_status_watch)
xenbus_switch_state(dev, state);
}
/* Handle backend state transitions:
*
* The backend state starts in Initialising and the following transitions are
* allowed.
*
* Initialising -> InitWait -> Connected
* \
* \ ^ \ |
* \ | \ |
* \ | \ |
* \ | \ |
* \ | \ |
* \ | \ |
* V | V V
*
* Closed <-> Closing
*
* The state argument specifies the eventual state of the backend and the
* function transitions to that state via the shortest path.
*/
static void set_backend_state(struct backend_info *be,
enum xenbus_state state)
{
while (be->state != state) {
switch (be->state) {
case XenbusStateInitialising:
switch (state) {
case XenbusStateInitWait:
case XenbusStateConnected:
case XenbusStateClosing:
backend_switch_state(be, XenbusStateInitWait);
break;
case XenbusStateClosed:
backend_switch_state(be, XenbusStateClosed);
break;
default:
BUG();
}
break;
case XenbusStateClosed:
switch (state) {
case XenbusStateInitWait:
case XenbusStateConnected:
backend_switch_state(be, XenbusStateInitWait);
break;
case XenbusStateClosing:
backend_switch_state(be, XenbusStateClosing);
break;
default:
BUG();
}
break;
case XenbusStateInitWait:
switch (state) {
case XenbusStateConnected:
backend_connect(be);
backend_switch_state(be, XenbusStateConnected);
break;
case XenbusStateClosing:
case XenbusStateClosed:
backend_switch_state(be, XenbusStateClosing);
break;
default:
BUG();
}
break;
case XenbusStateConnected:
switch (state) {
case XenbusStateInitWait:
case XenbusStateClosing:
case XenbusStateClosed:
backend_disconnect(be);
backend_switch_state(be, XenbusStateClosing);
break;
default:
BUG();
}
break;
case XenbusStateClosing:
switch (state) {
case XenbusStateInitWait:
case XenbusStateConnected:
case XenbusStateClosed:
backend_switch_state(be, XenbusStateClosed);
break;
default:
BUG();
}
break;
default:
BUG();
}
}
}
static void read_xenbus_frontend_xdp(struct backend_info *be,
struct xenbus_device *dev)
{
struct xenvif *vif = be->vif;
u16 headroom;
int err;
err = xenbus_scanf(XBT_NIL, dev->otherend,
"xdp-headroom", "%hu", &headroom);
if (err != 1) {
vif->xdp_headroom = 0;
return;
}
if (headroom > XEN_NETIF_MAX_XDP_HEADROOM)
headroom = XEN_NETIF_MAX_XDP_HEADROOM;
vif->xdp_headroom = headroom;
}
/*
* Callback received when the frontend's state changes.
*/
static void frontend_changed(struct xenbus_device *dev,
enum xenbus_state frontend_state)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
pr_debug("%s -> %s\n", dev->otherend, xenbus_strstate(frontend_state));
be->frontend_state = frontend_state;
switch (frontend_state) {
case XenbusStateInitialising:
set_backend_state(be, XenbusStateInitWait);
break;
case XenbusStateInitialised:
break;
case XenbusStateConnected:
set_backend_state(be, XenbusStateConnected);
break;
case XenbusStateReconfiguring:
read_xenbus_frontend_xdp(be, dev);
xenbus_switch_state(dev, XenbusStateReconfigured);
break;
case XenbusStateClosing:
set_backend_state(be, XenbusStateClosing);
break;
case XenbusStateClosed:
set_backend_state(be, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
fallthrough; /* if not online */
case XenbusStateUnknown:
set_backend_state(be, XenbusStateClosed);
device_unregister(&dev->dev);
break;
default:
xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
frontend_state);
break;
}
}
static void xen_net_read_rate(struct xenbus_device *dev,
unsigned long *bytes, unsigned long *usec)
{
char *s, *e;
unsigned long b, u;
char *ratestr;
/* Default to unlimited bandwidth. */
*bytes = ~0UL;
*usec = 0;
ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
if (IS_ERR(ratestr))
return;
s = ratestr;
b = simple_strtoul(s, &e, 10);
if ((s == e) || (*e != ','))
goto fail;
s = e + 1;
u = simple_strtoul(s, &e, 10);
if ((s == e) || (*e != '\0'))
goto fail;
*bytes = b;
*usec = u;
kfree(ratestr);
return;
fail:
pr_warn("Failed to parse network rate limit. Traffic unlimited.\n");
kfree(ratestr);
}
static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
{
char *s, *e, *macstr;
int i;
macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
if (IS_ERR(macstr))
return PTR_ERR(macstr);
for (i = 0; i < ETH_ALEN; i++) {
mac[i] = simple_strtoul(s, &e, 16);
if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
kfree(macstr);
return -ENOENT;
}
s = e+1;
}
kfree(macstr);
return 0;
}
static void xen_net_rate_changed(struct xenbus_watch *watch,
const char *path, const char *token)
{
struct xenvif *vif = container_of(watch, struct xenvif, credit_watch);
struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
unsigned long credit_bytes;
unsigned long credit_usec;
unsigned int queue_index;
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
for (queue_index = 0; queue_index < vif->num_queues; queue_index++) {
struct xenvif_queue *queue = &vif->queues[queue_index];
queue->credit_bytes = credit_bytes;
queue->credit_usec = credit_usec;
if (!mod_timer_pending(&queue->credit_timeout, jiffies) &&
queue->remaining_credit > queue->credit_bytes) {
queue->remaining_credit = queue->credit_bytes;
}
}
}
static int xen_register_credit_watch(struct xenbus_device *dev,
struct xenvif *vif)
{
int err = 0;
char *node;
unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
if (vif->credit_watch.node)
return -EADDRINUSE;
node = kmalloc(maxlen, GFP_KERNEL);
if (!node)
return -ENOMEM;
snprintf(node, maxlen, "%s/rate", dev->nodename);
vif->credit_watch.node = node;
vif->credit_watch.will_handle = NULL;
vif->credit_watch.callback = xen_net_rate_changed;
err = register_xenbus_watch(&vif->credit_watch);
if (err) {
pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
kfree(node);
vif->credit_watch.node = NULL;
vif->credit_watch.will_handle = NULL;
vif->credit_watch.callback = NULL;
}
return err;
}
static void xen_unregister_credit_watch(struct xenvif *vif)
{
if (vif->credit_watch.node) {
unregister_xenbus_watch(&vif->credit_watch);
kfree(vif->credit_watch.node);
vif->credit_watch.node = NULL;
}
}
static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
const char *path, const char *token)
{
struct xenvif *vif = container_of(watch, struct xenvif,
mcast_ctrl_watch);
struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
vif->multicast_control = !!xenbus_read_unsigned(dev->otherend,
"request-multicast-control", 0);
}
static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
struct xenvif *vif)
{
int err = 0;
char *node;
unsigned maxlen = strlen(dev->otherend) +
sizeof("/request-multicast-control");
if (vif->mcast_ctrl_watch.node) {
pr_err_ratelimited("Watch is already registered\n");
return -EADDRINUSE;
}
node = kmalloc(maxlen, GFP_KERNEL);
if (!node) {
pr_err("Failed to allocate memory for watch\n");
return -ENOMEM;
}
snprintf(node, maxlen, "%s/request-multicast-control",
dev->otherend);
vif->mcast_ctrl_watch.node = node;
vif->mcast_ctrl_watch.will_handle = NULL;
vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
err = register_xenbus_watch(&vif->mcast_ctrl_watch);
if (err) {
pr_err("Failed to set watcher %s\n",
vif->mcast_ctrl_watch.node);
kfree(node);
vif->mcast_ctrl_watch.node = NULL;
vif->mcast_ctrl_watch.will_handle = NULL;
vif->mcast_ctrl_watch.callback = NULL;
}
return err;
}
static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
{
if (vif->mcast_ctrl_watch.node) {
unregister_xenbus_watch(&vif->mcast_ctrl_watch);
kfree(vif->mcast_ctrl_watch.node);
vif->mcast_ctrl_watch.node = NULL;
}
}
static void xen_register_watchers(struct xenbus_device *dev,
struct xenvif *vif)
{
xen_register_credit_watch(dev, vif);
xen_register_mcast_ctrl_watch(dev, vif);
}
static void xen_unregister_watchers(struct xenvif *vif)
{
xen_unregister_mcast_ctrl_watch(vif);
xen_unregister_credit_watch(vif);
}
static void unregister_hotplug_status_watch(struct backend_info *be)
{
if (be->have_hotplug_status_watch) {
unregister_xenbus_watch(&be->hotplug_status_watch);
kfree(be->hotplug_status_watch.node);
}
be->have_hotplug_status_watch = 0;
}
static void hotplug_status_changed(struct xenbus_watch *watch,
const char *path,
const char *token)
{
struct backend_info *be = container_of(watch,
struct backend_info,
hotplug_status_watch);
char *str;
unsigned int len;
str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len);
if (IS_ERR(str))
return;
if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) {
/* Complete any pending state change */
xenbus_switch_state(be->dev, be->state);
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
}
kfree(str);
}
static int connect_ctrl_ring(struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xenvif *vif = be->vif;
unsigned int val;
grant_ref_t ring_ref;
unsigned int evtchn;
int err;
err = xenbus_scanf(XBT_NIL, dev->otherend,
"ctrl-ring-ref", "%u", &val);
if (err < 0)
goto done; /* The frontend does not have a control ring */
ring_ref = val;
err = xenbus_scanf(XBT_NIL, dev->otherend,
"event-channel-ctrl", "%u", &val);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"reading %s/event-channel-ctrl",
dev->otherend);
goto fail;
}
evtchn = val;
err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
if (err) {
xenbus_dev_fatal(dev, err,
"mapping shared-frame %u port %u",
ring_ref, evtchn);
goto fail;
}
done:
return 0;
fail:
return err;
}
static void connect(struct backend_info *be)
{
int err;
struct xenbus_device *dev = be->dev;
unsigned long credit_bytes, credit_usec;
unsigned int queue_index;
unsigned int requested_num_queues;
struct xenvif_queue *queue;
/* Check whether the frontend requested multiple queues
* and read the number requested.
*/
requested_num_queues = xenbus_read_unsigned(dev->otherend,
"multi-queue-num-queues", 1);
if (requested_num_queues > xenvif_max_queues) {
/* buggy or malicious guest */
xenbus_dev_fatal(dev, -EINVAL,
"guest requested %u queues, exceeding the maximum of %u.",
requested_num_queues, xenvif_max_queues);
return;
}
err = xen_net_read_mac(dev, be->vif->fe_dev_addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
return;
}
xen_net_read_rate(dev, &credit_bytes, &credit_usec);
xen_unregister_watchers(be->vif);
xen_register_watchers(dev, be->vif);
read_xenbus_vif_flags(be);
err = connect_ctrl_ring(be);
if (err) {
xenbus_dev_fatal(dev, err, "connecting control ring");
return;
}
/* Use the number of queues requested by the frontend */
be->vif->queues = vzalloc(array_size(requested_num_queues,
sizeof(struct xenvif_queue)));
if (!be->vif->queues) {
xenbus_dev_fatal(dev, -ENOMEM,
"allocating queues");
return;
}
be->vif->num_queues = requested_num_queues;
be->vif->stalled_queues = requested_num_queues;
for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
queue = &be->vif->queues[queue_index];
queue->vif = be->vif;
queue->id = queue_index;
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
be->vif->dev->name, queue->id);
err = xenvif_init_queue(queue);
if (err) {
/* xenvif_init_queue() cleans up after itself on
* failure, but we need to clean up any previously
* initialised queues. Set num_queues to i so that
* earlier queues can be destroyed using the regular
* disconnect logic.
*/
be->vif->num_queues = queue_index;
goto err;
}
queue->credit_bytes = credit_bytes;
queue->remaining_credit = credit_bytes;
queue->credit_usec = credit_usec;
err = connect_data_rings(be, queue);
if (err) {
/* connect_data_rings() cleans up after itself on
* failure, but we need to clean up after
* xenvif_init_queue() here, and also clean up any
* previously initialised queues.
*/
xenvif_deinit_queue(queue);
be->vif->num_queues = queue_index;
goto err;
}
}
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_addif(be->vif);
#endif /* CONFIG_DEBUG_FS */
/* Initialisation completed, tell core driver the number of
* active queues.
*/
rtnl_lock();
netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
rtnl_unlock();
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
hotplug_status_changed,
"%s/%s", dev->nodename, "hotplug-status");
if (!err)
be->have_hotplug_status_watch = 1;
netif_tx_wake_all_queues(be->vif->dev);
return;
err:
if (be->vif->num_queues > 0)
xenvif_disconnect_data(be->vif); /* Clean up existing queues */
for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
xenvif_deinit_queue(&be->vif->queues[queue_index]);
vfree(be->vif->queues);
be->vif->queues = NULL;
be->vif->num_queues = 0;
xenvif_disconnect_ctrl(be->vif);
return;
}
static int connect_data_rings(struct backend_info *be,
struct xenvif_queue *queue)
{
struct xenbus_device *dev = be->dev;
unsigned int num_queues = queue->vif->num_queues;
unsigned long tx_ring_ref, rx_ring_ref;
unsigned int tx_evtchn, rx_evtchn;
int err;
char *xspath;
size_t xspathsize;
const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
/* If the frontend requested 1 queue, or we have fallen back
* to single queue due to lack of frontend support for multi-
* queue, expect the remaining XenStore keys in the toplevel
* directory. Otherwise, expect them in a subdirectory called
* queue-N.
*/
if (num_queues == 1) {
xspath = kstrdup(dev->otherend, GFP_KERNEL);
if (!xspath) {
xenbus_dev_fatal(dev, -ENOMEM,
"reading ring references");
return -ENOMEM;
}
} else {
xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
xspath = kzalloc(xspathsize, GFP_KERNEL);
if (!xspath) {
xenbus_dev_fatal(dev, -ENOMEM,
"reading ring references");
return -ENOMEM;
}
snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend,
queue->id);
}
err = xenbus_gather(XBT_NIL, xspath,
"tx-ring-ref", "%lu", &tx_ring_ref,
"rx-ring-ref", "%lu", &rx_ring_ref, NULL);
if (err) {
xenbus_dev_fatal(dev, err,
"reading %s/ring-ref",
xspath);
goto err;
}
/* Try split event channels first, then single event channel. */
err = xenbus_gather(XBT_NIL, xspath,
"event-channel-tx", "%u", &tx_evtchn,
"event-channel-rx", "%u", &rx_evtchn, NULL);
if (err < 0) {
err = xenbus_scanf(XBT_NIL, xspath,
"event-channel", "%u", &tx_evtchn);
if (err < 0) {
xenbus_dev_fatal(dev, err,
"reading %s/event-channel(-tx/rx)",
xspath);
goto err;
}
rx_evtchn = tx_evtchn;
}
/* Map the shared frame, irq etc. */
err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
tx_evtchn, rx_evtchn);
if (err) {
xenbus_dev_fatal(dev, err,
"mapping shared-frames %lu/%lu port tx %u rx %u",
tx_ring_ref, rx_ring_ref,
tx_evtchn, rx_evtchn);
goto err;
}
err = 0;
err: /* Regular return falls through with err == 0 */
kfree(xspath);
return err;
}
static int read_xenbus_vif_flags(struct backend_info *be)
{
struct xenvif *vif = be->vif;
struct xenbus_device *dev = be->dev;
unsigned int rx_copy;
int err;
err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
&rx_copy);
if (err == -ENOENT) {
err = 0;
rx_copy = 0;
}
if (err < 0) {
xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
dev->otherend);
return err;
}
if (!rx_copy)
return -EOPNOTSUPP;
if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) {
/* - Reduce drain timeout to poll more frequently for
* Rx requests.
* - Disable Rx stall detection.
*/
be->vif->drain_timeout = msecs_to_jiffies(30);
be->vif->stall_timeout = 0;
}
vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0);
vif->gso_mask = 0;
if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0))
vif->gso_mask |= GSO_BIT(TCPV4);
if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0))
vif->gso_mask |= GSO_BIT(TCPV6);
vif->ip_csum = !xenbus_read_unsigned(dev->otherend,
"feature-no-csum-offload", 0);
vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
"feature-ipv6-csum-offload", 0);
read_xenbus_frontend_xdp(be, dev);
return 0;
}
static void netback_remove(struct xenbus_device *dev)
{
struct backend_info *be = dev_get_drvdata(&dev->dev);
unregister_hotplug_status_watch(be);
xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
backend_disconnect(be);
xenvif_free(be->vif);
be->vif = NULL;
}
kfree(be->hotplug_script);
kfree(be);
dev_set_drvdata(&dev->dev, NULL);
}
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and switch to InitWait.
*/
static int netback_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
const char *message;
struct xenbus_transaction xbt;
int err;
int sg;
const char *script;
struct backend_info *be = kzalloc(sizeof(*be), GFP_KERNEL);
if (!be) {
xenbus_dev_fatal(dev, -ENOMEM,
"allocating backend structure");
return -ENOMEM;
}
be->dev = dev;
dev_set_drvdata(&dev->dev, be);
sg = 1;
do {
err = xenbus_transaction_start(&xbt);
if (err) {
xenbus_dev_fatal(dev, err, "starting transaction");
goto fail;
}
err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
if (err) {
message = "writing feature-sg";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
"%d", sg);
if (err) {
message = "writing feature-gso-tcpv4";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
"%d", sg);
if (err) {
message = "writing feature-gso-tcpv6";
goto abort_transaction;
}
/* We support partial checksum setup for IPv6 packets */
err = xenbus_printf(xbt, dev->nodename,
"feature-ipv6-csum-offload",
"%d", 1);
if (err) {
message = "writing feature-ipv6-csum-offload";
goto abort_transaction;
}
/* We support rx-copy path. */
err = xenbus_printf(xbt, dev->nodename,
"feature-rx-copy", "%d", 1);
if (err) {
message = "writing feature-rx-copy";
goto abort_transaction;
}
/* we can adjust a headroom for netfront XDP processing */
err = xenbus_printf(xbt, dev->nodename,
"feature-xdp-headroom", "%d",
provides_xdp_headroom);
if (err) {
message = "writing feature-xdp-headroom";
goto abort_transaction;
}
/* We don't support rx-flip path (except old guests who
* don't grok this feature flag).
*/
err = xenbus_printf(xbt, dev->nodename,
"feature-rx-flip", "%d", 0);
if (err) {
message = "writing feature-rx-flip";
goto abort_transaction;
}
/* We support dynamic multicast-control. */
err = xenbus_printf(xbt, dev->nodename,
"feature-multicast-control", "%d", 1);
if (err) {
message = "writing feature-multicast-control";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename,
"feature-dynamic-multicast-control",
"%d", 1);
if (err) {
message = "writing feature-dynamic-multicast-control";
goto abort_transaction;
}
err = xenbus_transaction_end(xbt, 0);
} while (err == -EAGAIN);
if (err) {
xenbus_dev_fatal(dev, err, "completing transaction");
goto fail;
}
/* Split event channels support, this is optional so it is not
* put inside the above loop.
*/
err = xenbus_printf(XBT_NIL, dev->nodename,
"feature-split-event-channels",
"%u", separate_tx_rx_irq);
if (err)
pr_debug("Error writing feature-split-event-channels\n");
/* Multi-queue support: This is an optional feature. */
err = xenbus_printf(XBT_NIL, dev->nodename,
"multi-queue-max-queues", "%u", xenvif_max_queues);
if (err)
pr_debug("Error writing multi-queue-max-queues\n");
err = xenbus_printf(XBT_NIL, dev->nodename,
"feature-ctrl-ring",
"%u", true);
if (err)
pr_debug("Error writing feature-ctrl-ring\n");
backend_switch_state(be, XenbusStateInitWait);
script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
if (IS_ERR(script)) {
err = PTR_ERR(script);
xenbus_dev_fatal(dev, err, "reading script");
goto fail;
}
be->hotplug_script = script;
/* This kicks hotplug scripts, so do it immediately. */
err = backend_create_xenvif(be);
if (err)
goto fail;
return 0;
abort_transaction:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, err, "%s", message);
fail:
pr_debug("failed\n");
netback_remove(dev);
return err;
}
static const struct xenbus_device_id netback_ids[] = {
{ "vif" },
{ "" }
};
static struct xenbus_driver netback_driver = {
.ids = netback_ids,
.probe = netback_probe,
.remove = netback_remove,
.uevent = netback_uevent,
.otherend_changed = frontend_changed,
.allow_rebind = true,
};
int xenvif_xenbus_init(void)
{
return xenbus_register_backend(&netback_driver);
}
void xenvif_xenbus_fini(void)
{
return xenbus_unregister_driver(&netback_driver);
}
|
linux-master
|
drivers/net/xen-netback/xenbus.c
|
/*
* Copyright (c) 2016 Citrix Systems Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Softare Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define XEN_NETIF_DEFINE_TOEPLITZ
#include "common.h"
#include <linux/vmalloc.h>
#include <linux/rculist.h>
static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
unsigned int len, u32 val)
{
struct xenvif_hash_cache_entry *new, *entry, *oldest;
unsigned long flags;
bool found;
new = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!new)
return;
memcpy(new->tag, tag, len);
new->len = len;
new->val = val;
spin_lock_irqsave(&vif->hash.cache.lock, flags);
found = false;
oldest = NULL;
list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
lockdep_is_held(&vif->hash.cache.lock)) {
/* Make sure we don't add duplicate entries */
if (entry->len == len &&
memcmp(entry->tag, tag, len) == 0)
found = true;
if (!oldest || entry->seq < oldest->seq)
oldest = entry;
}
if (!found) {
new->seq = atomic_inc_return(&vif->hash.cache.seq);
list_add_rcu(&new->link, &vif->hash.cache.list);
if (++vif->hash.cache.count > xenvif_hash_cache_size) {
list_del_rcu(&oldest->link);
vif->hash.cache.count--;
kfree_rcu(oldest, rcu);
}
}
spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
if (found)
kfree(new);
}
static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
unsigned int len)
{
u32 val;
val = xen_netif_toeplitz_hash(vif->hash.key,
sizeof(vif->hash.key),
data, len);
if (xenvif_hash_cache_size != 0)
xenvif_add_hash(vif, data, len, val);
return val;
}
static void xenvif_flush_hash(struct xenvif *vif)
{
struct xenvif_hash_cache_entry *entry;
unsigned long flags;
if (xenvif_hash_cache_size == 0)
return;
spin_lock_irqsave(&vif->hash.cache.lock, flags);
list_for_each_entry_rcu(entry, &vif->hash.cache.list, link,
lockdep_is_held(&vif->hash.cache.lock)) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
kfree_rcu(entry, rcu);
}
spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
}
static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
unsigned int len)
{
struct xenvif_hash_cache_entry *entry;
u32 val;
bool found;
if (len >= XEN_NETBK_HASH_TAG_SIZE)
return 0;
if (xenvif_hash_cache_size == 0)
return xenvif_new_hash(vif, data, len);
rcu_read_lock();
found = false;
list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
if (entry->len == len &&
memcmp(entry->tag, data, len) == 0) {
val = entry->val;
entry->seq = atomic_inc_return(&vif->hash.cache.seq);
found = true;
break;
}
}
rcu_read_unlock();
if (!found)
val = xenvif_new_hash(vif, data, len);
return val;
}
void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
{
struct flow_keys flow;
u32 hash = 0;
enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
u32 flags = vif->hash.flags;
bool has_tcp_hdr;
/* Quick rejection test: If the network protocol doesn't
* correspond to any enabled hash type then there's no point
* in parsing the packet header.
*/
switch (skb->protocol) {
case htons(ETH_P_IP):
if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
XEN_NETIF_CTRL_HASH_TYPE_IPV4))
break;
goto done;
case htons(ETH_P_IPV6):
if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
XEN_NETIF_CTRL_HASH_TYPE_IPV6))
break;
goto done;
default:
goto done;
}
memset(&flow, 0, sizeof(flow));
if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
goto done;
has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
!(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
switch (skb->protocol) {
case htons(ETH_P_IP):
if (has_tcp_hdr &&
(flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
u8 data[12];
memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
memcpy(&data[8], &flow.ports.src, 2);
memcpy(&data[10], &flow.ports.dst, 2);
hash = xenvif_find_hash(vif, data, sizeof(data));
type = PKT_HASH_TYPE_L4;
} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
u8 data[8];
memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
hash = xenvif_find_hash(vif, data, sizeof(data));
type = PKT_HASH_TYPE_L3;
}
break;
case htons(ETH_P_IPV6):
if (has_tcp_hdr &&
(flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
u8 data[36];
memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
memcpy(&data[32], &flow.ports.src, 2);
memcpy(&data[34], &flow.ports.dst, 2);
hash = xenvif_find_hash(vif, data, sizeof(data));
type = PKT_HASH_TYPE_L4;
} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
u8 data[32];
memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
hash = xenvif_find_hash(vif, data, sizeof(data));
type = PKT_HASH_TYPE_L3;
}
break;
}
done:
if (type == PKT_HASH_TYPE_NONE)
skb_clear_hash(skb);
else
__skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
}
u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
{
switch (alg) {
case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
break;
default:
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
vif->hash.alg = alg;
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
{
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
*flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
{
if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.flags = flags;
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
{
u8 *key = vif->hash.key;
struct gnttab_copy copy_op = {
.source.u.ref = gref,
.source.domid = vif->domid,
.dest.u.gmfn = virt_to_gfn(key),
.dest.domid = DOMID_SELF,
.dest.offset = xen_offset_in_page(key),
.len = len,
.flags = GNTCOPY_source_gref
};
if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
if (copy_op.len != 0) {
gnttab_batch_copy(©_op, 1);
if (copy_op.status != GNTST_okay)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
/* Clear any remaining key octets */
if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
xenvif_flush_hash(vif);
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
{
if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.size = size;
memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
sizeof(u32) * size);
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
u32 off)
{
u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
unsigned int nr = 1;
struct gnttab_copy copy_op[2] = {{
.source.u.ref = gref,
.source.domid = vif->domid,
.dest.domid = DOMID_SELF,
.len = len * sizeof(*mapping),
.flags = GNTCOPY_source_gref
}};
if ((off + len < off) || (off + len > vif->hash.size) ||
len > XEN_PAGE_SIZE / sizeof(*mapping))
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
copy_op[1] = copy_op[0];
copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
copy_op[1].dest.offset = 0;
copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
copy_op[0].len = copy_op[1].source.offset;
nr = 2;
}
memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
vif->hash.size * sizeof(*mapping));
if (copy_op[0].len != 0) {
gnttab_batch_copy(copy_op, nr);
if (copy_op[0].status != GNTST_okay ||
copy_op[nr - 1].status != GNTST_okay)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
while (len-- != 0)
if (mapping[off++] >= vif->num_queues)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.mapping_sel = !vif->hash.mapping_sel;
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
#ifdef CONFIG_DEBUG_FS
void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
{
unsigned int i;
switch (vif->hash.alg) {
case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
break;
case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
seq_puts(m, "Hash Algorithm: NONE\n");
fallthrough;
default:
return;
}
if (vif->hash.flags) {
seq_puts(m, "\nHash Flags:\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
seq_puts(m, "- IPv4\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
seq_puts(m, "- IPv4 + TCP\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
seq_puts(m, "- IPv6\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
seq_puts(m, "- IPv6 + TCP\n");
}
seq_puts(m, "\nHash Key:\n");
for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
unsigned int j, n;
n = 8;
if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%02x ", vif->hash.key[i]);
seq_puts(m, "\n");
}
if (vif->hash.size != 0) {
const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
seq_puts(m, "\nHash Mapping:\n");
for (i = 0; i < vif->hash.size; ) {
unsigned int j, n;
n = 8;
if (i + n >= vif->hash.size)
n = vif->hash.size - i;
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%4u ", mapping[i]);
seq_puts(m, "\n");
}
}
}
#endif /* CONFIG_DEBUG_FS */
void xenvif_init_hash(struct xenvif *vif)
{
if (xenvif_hash_cache_size == 0)
return;
BUG_ON(vif->hash.cache.count);
spin_lock_init(&vif->hash.cache.lock);
INIT_LIST_HEAD(&vif->hash.cache.list);
}
void xenvif_deinit_hash(struct xenvif *vif)
{
xenvif_flush_hash(vif);
}
|
linux-master
|
drivers/net/xen-netback/hash.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/common/amba.c
*
* Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/amba/bus.h>
#include <linux/sizes.h>
#include <linux/limits.h>
#include <linux/clk/clk-conf.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
/* called on periphid match and class 0x9 coresight device. */
static int
amba_cs_uci_id_match(const struct amba_id *table, struct amba_device *dev)
{
int ret = 0;
struct amba_cs_uci_id *uci;
uci = table->data;
/* no table data or zero mask - return match on periphid */
if (!uci || (uci->devarch_mask == 0))
return 1;
/* test against read devtype and masked devarch value */
ret = (dev->uci.devtype == uci->devtype) &&
((dev->uci.devarch & uci->devarch_mask) == uci->devarch);
return ret;
}
static const struct amba_id *
amba_lookup(const struct amba_id *table, struct amba_device *dev)
{
while (table->mask) {
if (((dev->periphid & table->mask) == table->id) &&
((dev->cid != CORESIGHT_CID) ||
(amba_cs_uci_id_match(table, dev))))
return table;
table++;
}
return NULL;
}
static int amba_get_enable_pclk(struct amba_device *pcdev)
{
int ret;
pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
if (IS_ERR(pcdev->pclk))
return PTR_ERR(pcdev->pclk);
ret = clk_prepare_enable(pcdev->pclk);
if (ret)
clk_put(pcdev->pclk);
return ret;
}
static void amba_put_disable_pclk(struct amba_device *pcdev)
{
clk_disable_unprepare(pcdev->pclk);
clk_put(pcdev->pclk);
}
static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct amba_device *dev = to_amba_device(_dev);
ssize_t len;
device_lock(_dev);
len = sprintf(buf, "%s\n", dev->driver_override);
device_unlock(_dev);
return len;
}
static ssize_t driver_override_store(struct device *_dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
int ret;
ret = driver_set_override(_dev, &dev->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR_RW(driver_override);
#define amba_attr_func(name,fmt,arg...) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
{ \
struct amba_device *dev = to_amba_device(_dev); \
return sprintf(buf, fmt, arg); \
} \
static DEVICE_ATTR_RO(name)
amba_attr_func(id, "%08x\n", dev->periphid);
amba_attr_func(resource, "\t%016llx\t%016llx\t%016lx\n",
(unsigned long long)dev->res.start, (unsigned long long)dev->res.end,
dev->res.flags);
static struct attribute *amba_dev_attrs[] = {
&dev_attr_id.attr,
&dev_attr_resource.attr,
&dev_attr_driver_override.attr,
NULL,
};
ATTRIBUTE_GROUPS(amba_dev);
static int amba_read_periphid(struct amba_device *dev)
{
struct reset_control *rstc;
u32 size, pid, cid;
void __iomem *tmp;
int i, ret;
ret = dev_pm_domain_attach(&dev->dev, true);
if (ret) {
dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret);
goto err_out;
}
ret = amba_get_enable_pclk(dev);
if (ret) {
dev_dbg(&dev->dev, "can't get pclk: %d\n", ret);
goto err_pm;
}
/*
* Find reset control(s) of the amba bus and de-assert them.
*/
rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret != -EPROBE_DEFER)
dev_err(&dev->dev, "can't get reset: %d\n", ret);
goto err_clk;
}
reset_control_deassert(rstc);
reset_control_put(rstc);
size = resource_size(&dev->res);
tmp = ioremap(dev->res.start, size);
if (!tmp) {
ret = -ENOMEM;
goto err_clk;
}
/*
* Read pid and cid based on size of resource
* they are located at end of region
*/
for (pid = 0, i = 0; i < 4; i++)
pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8);
for (cid = 0, i = 0; i < 4; i++)
cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8);
if (cid == CORESIGHT_CID) {
/* set the base to the start of the last 4k block */
void __iomem *csbase = tmp + size - 4096;
dev->uci.devarch = readl(csbase + UCI_REG_DEVARCH_OFFSET);
dev->uci.devtype = readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
}
if (cid == AMBA_CID || cid == CORESIGHT_CID) {
dev->periphid = pid;
dev->cid = cid;
}
if (!dev->periphid)
ret = -ENODEV;
iounmap(tmp);
err_clk:
amba_put_disable_pclk(dev);
err_pm:
dev_pm_domain_detach(&dev->dev, true);
err_out:
return ret;
}
static int amba_match(struct device *dev, struct device_driver *drv)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(drv);
mutex_lock(&pcdev->periphid_lock);
if (!pcdev->periphid) {
int ret = amba_read_periphid(pcdev);
/*
* Returning any error other than -EPROBE_DEFER from bus match
* can cause driver registration failure. So, if there's a
* permanent failure in reading pid and cid, simply map it to
* -EPROBE_DEFER.
*/
if (ret) {
mutex_unlock(&pcdev->periphid_lock);
return -EPROBE_DEFER;
}
dev_set_uevent_suppress(dev, false);
kobject_uevent(&dev->kobj, KOBJ_ADD);
}
mutex_unlock(&pcdev->periphid_lock);
/* When driver_override is set, only bind to the matching driver */
if (pcdev->driver_override)
return !strcmp(pcdev->driver_override, drv->name);
return amba_lookup(pcdrv->id_table, pcdev) != NULL;
}
static int amba_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct amba_device *pcdev = to_amba_device(dev);
int retval = 0;
retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
if (retval)
return retval;
retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
return retval;
}
static int of_amba_device_decode_irq(struct amba_device *dev)
{
struct device_node *node = dev->dev.of_node;
int i, irq = 0;
if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
/* Decode the IRQs and address ranges */
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = of_irq_get(node, i);
if (irq < 0) {
if (irq == -EPROBE_DEFER)
return irq;
irq = 0;
}
dev->irq[i] = irq;
}
}
return 0;
}
/*
* These are the device model conversion veneers; they convert the
* device model structures to our more specific structures.
*/
static int amba_probe(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(dev->driver);
const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
int ret;
do {
ret = of_amba_device_decode_irq(pcdev);
if (ret)
break;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret < 0)
break;
ret = dev_pm_domain_attach(dev, true);
if (ret)
break;
ret = amba_get_enable_pclk(pcdev);
if (ret) {
dev_pm_domain_detach(dev, true);
break;
}
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
ret = pcdrv->probe(pcdev, id);
if (ret == 0)
break;
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
dev_pm_domain_detach(dev, true);
} while (0);
return ret;
}
static void amba_remove(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *drv = to_amba_driver(dev->driver);
pm_runtime_get_sync(dev);
if (drv->remove)
drv->remove(pcdev);
pm_runtime_put_noidle(dev);
/* Undo the runtime PM settings in amba_probe() */
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
dev_pm_domain_detach(dev, true);
}
static void amba_shutdown(struct device *dev)
{
struct amba_driver *drv;
if (!dev->driver)
return;
drv = to_amba_driver(dev->driver);
if (drv->shutdown)
drv->shutdown(to_amba_device(dev));
}
static int amba_dma_configure(struct device *dev)
{
struct amba_driver *drv = to_amba_driver(dev->driver);
enum dev_dma_attr attr;
int ret = 0;
if (dev->of_node) {
ret = of_dma_configure(dev, dev->of_node, true);
} else if (has_acpi_companion(dev)) {
attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
ret = acpi_dma_configure(dev, attr);
}
if (!ret && !drv->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
}
return ret;
}
static void amba_dma_cleanup(struct device *dev)
{
struct amba_driver *drv = to_amba_driver(dev->driver);
if (!drv->driver_managed_dma)
iommu_device_unuse_default_domain(dev);
}
#ifdef CONFIG_PM
/*
* Hooks to provide runtime PM of the pclk (bus clock). It is safe to
* enable/disable the bus clock at runtime PM suspend/resume as this
* does not result in loss of context.
*/
static int amba_pm_runtime_suspend(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
int ret = pm_generic_runtime_suspend(dev);
if (ret == 0 && dev->driver) {
if (pm_runtime_is_irq_safe(dev))
clk_disable(pcdev->pclk);
else
clk_disable_unprepare(pcdev->pclk);
}
return ret;
}
static int amba_pm_runtime_resume(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
int ret;
if (dev->driver) {
if (pm_runtime_is_irq_safe(dev))
ret = clk_enable(pcdev->pclk);
else
ret = clk_prepare_enable(pcdev->pclk);
/* Failure is probably fatal to the system, but... */
if (ret)
return ret;
}
return pm_generic_runtime_resume(dev);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops amba_pm = {
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
NULL
)
};
/*
* Primecells are part of the Advanced Microcontroller Bus Architecture,
* so we call the bus "amba".
* DMA configuration for platform and AMBA bus is same. So here we reuse
* platform's DMA config routine.
*/
struct bus_type amba_bustype = {
.name = "amba",
.dev_groups = amba_dev_groups,
.match = amba_match,
.uevent = amba_uevent,
.probe = amba_probe,
.remove = amba_remove,
.shutdown = amba_shutdown,
.dma_configure = amba_dma_configure,
.dma_cleanup = amba_dma_cleanup,
.pm = &amba_pm,
};
EXPORT_SYMBOL_GPL(amba_bustype);
static int __init amba_init(void)
{
return bus_register(&amba_bustype);
}
postcore_initcall(amba_init);
static int amba_proxy_probe(struct amba_device *adev,
const struct amba_id *id)
{
WARN(1, "Stub driver should never match any device.\n");
return -ENODEV;
}
static const struct amba_id amba_stub_drv_ids[] = {
{ 0, 0 },
};
static struct amba_driver amba_proxy_drv = {
.drv = {
.name = "amba-proxy",
},
.probe = amba_proxy_probe,
.id_table = amba_stub_drv_ids,
};
static int __init amba_stub_drv_init(void)
{
if (!IS_ENABLED(CONFIG_MODULES))
return 0;
/*
* The amba_match() function will get called only if there is at least
* one amba driver registered. If all amba drivers are modules and are
* only loaded based on uevents, then we'll hit a chicken-and-egg
* situation where amba_match() is waiting on drivers and drivers are
* waiting on amba_match(). So, register a stub driver to make sure
* amba_match() is called even if no amba driver has been registered.
*/
return amba_driver_register(&amba_proxy_drv);
}
late_initcall_sync(amba_stub_drv_init);
/**
* amba_driver_register - register an AMBA device driver
* @drv: amba device driver structure
*
* Register an AMBA device driver with the Linux device model
* core. If devices pre-exist, the drivers probe function will
* be called.
*/
int amba_driver_register(struct amba_driver *drv)
{
if (!drv->probe)
return -EINVAL;
drv->drv.bus = &amba_bustype;
return driver_register(&drv->drv);
}
EXPORT_SYMBOL(amba_driver_register);
/**
* amba_driver_unregister - remove an AMBA device driver
* @drv: AMBA device driver structure to remove
*
* Unregister an AMBA device driver from the Linux device
* model. The device model will call the drivers remove function
* for each device the device driver is currently handling.
*/
void amba_driver_unregister(struct amba_driver *drv)
{
driver_unregister(&drv->drv);
}
EXPORT_SYMBOL(amba_driver_unregister);
static void amba_device_release(struct device *dev)
{
struct amba_device *d = to_amba_device(dev);
of_node_put(d->dev.of_node);
if (d->res.parent)
release_resource(&d->res);
mutex_destroy(&d->periphid_lock);
kfree(d);
}
/**
* amba_device_add - add a previously allocated AMBA device structure
* @dev: AMBA device allocated by amba_device_alloc
* @parent: resource parent for this devices resources
*
* Claim the resource, and read the device cell ID if not already
* initialized. Register the AMBA device with the Linux device
* manager.
*/
int amba_device_add(struct amba_device *dev, struct resource *parent)
{
int ret;
ret = request_resource(parent, &dev->res);
if (ret)
return ret;
/* If primecell ID isn't hard-coded, figure it out */
if (!dev->periphid) {
/*
* AMBA device uevents require reading its pid and cid
* registers. To do this, the device must be on, clocked and
* out of reset. However in some cases those resources might
* not yet be available. If that's the case, we suppress the
* generation of uevents until we can read the pid and cid
* registers. See also amba_match().
*/
if (amba_read_periphid(dev))
dev_set_uevent_suppress(&dev->dev, true);
}
ret = device_add(&dev->dev);
if (ret)
release_resource(&dev->res);
return ret;
}
EXPORT_SYMBOL_GPL(amba_device_add);
static void amba_device_initialize(struct amba_device *dev, const char *name)
{
device_initialize(&dev->dev);
if (name)
dev_set_name(&dev->dev, "%s", name);
dev->dev.release = amba_device_release;
dev->dev.bus = &amba_bustype;
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->res.name = dev_name(&dev->dev);
mutex_init(&dev->periphid_lock);
}
/**
* amba_device_alloc - allocate an AMBA device
* @name: sysfs name of the AMBA device
* @base: base of AMBA device
* @size: size of AMBA device
*
* Allocate and initialize an AMBA device structure. Returns %NULL
* on failure.
*/
struct amba_device *amba_device_alloc(const char *name, resource_size_t base,
size_t size)
{
struct amba_device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev) {
amba_device_initialize(dev, name);
dev->res.start = base;
dev->res.end = base + size - 1;
dev->res.flags = IORESOURCE_MEM;
}
return dev;
}
EXPORT_SYMBOL_GPL(amba_device_alloc);
/**
* amba_device_register - register an AMBA device
* @dev: AMBA device to register
* @parent: parent memory resource
*
* Setup the AMBA device, reading the cell ID if present.
* Claim the resource, and register the AMBA device with
* the Linux device manager.
*/
int amba_device_register(struct amba_device *dev, struct resource *parent)
{
amba_device_initialize(dev, dev->dev.init_name);
dev->dev.init_name = NULL;
return amba_device_add(dev, parent);
}
EXPORT_SYMBOL(amba_device_register);
/**
* amba_device_put - put an AMBA device
* @dev: AMBA device to put
*/
void amba_device_put(struct amba_device *dev)
{
put_device(&dev->dev);
}
EXPORT_SYMBOL_GPL(amba_device_put);
/**
* amba_device_unregister - unregister an AMBA device
* @dev: AMBA device to remove
*
* Remove the specified AMBA device from the Linux device
* manager. All files associated with this object will be
* destroyed, and device drivers notified that the device has
* been removed. The AMBA device's resources including
* the amba_device structure will be freed once all
* references to it have been dropped.
*/
void amba_device_unregister(struct amba_device *dev)
{
device_unregister(&dev->dev);
}
EXPORT_SYMBOL(amba_device_unregister);
/**
* amba_request_regions - request all mem regions associated with device
* @dev: amba_device structure for device
* @name: name, or NULL to use driver name
*/
int amba_request_regions(struct amba_device *dev, const char *name)
{
int ret = 0;
u32 size;
if (!name)
name = dev->dev.driver->name;
size = resource_size(&dev->res);
if (!request_mem_region(dev->res.start, size, name))
ret = -EBUSY;
return ret;
}
EXPORT_SYMBOL(amba_request_regions);
/**
* amba_release_regions - release mem regions associated with device
* @dev: amba_device structure for device
*
* Release regions claimed by a successful call to amba_request_regions.
*/
void amba_release_regions(struct amba_device *dev)
{
u32 size;
size = resource_size(&dev->res);
release_mem_region(dev->res.start, size);
}
EXPORT_SYMBOL(amba_release_regions);
|
linux-master
|
drivers/amba/bus.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
* Copyright (C) 2011 Google, Inc.
*
* Author:
* Jay Cheng <[email protected]>
* James Wylder <[email protected]>
* Benoit Goby <[email protected]>
* Colin Cross <[email protected]>
* Hiroshi DOYU <[email protected]>
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/of.h>
#include <soc/tegra/ahb.h>
#define DRV_NAME "tegra-ahb"
#define AHB_ARBITRATION_DISABLE 0x04
#define AHB_ARBITRATION_PRIORITY_CTRL 0x08
#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
#define PRIORITY_SELECT_USB BIT(6)
#define PRIORITY_SELECT_USB2 BIT(18)
#define PRIORITY_SELECT_USB3 BIT(17)
#define AHB_GIZMO_AHB_MEM 0x10
#define ENB_FAST_REARBITRATE BIT(2)
#define DONT_SPLIT_AHB_WR BIT(7)
#define AHB_GIZMO_APB_DMA 0x14
#define AHB_GIZMO_IDE 0x1c
#define AHB_GIZMO_USB 0x20
#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x24
#define AHB_GIZMO_CPU_AHB_BRIDGE 0x28
#define AHB_GIZMO_COP_AHB_BRIDGE 0x2c
#define AHB_GIZMO_XBAR_APB_CTLR 0x30
#define AHB_GIZMO_VCP_AHB_BRIDGE 0x34
#define AHB_GIZMO_NAND 0x40
#define AHB_GIZMO_SDMMC4 0x48
#define AHB_GIZMO_XIO 0x4c
#define AHB_GIZMO_BSEV 0x64
#define AHB_GIZMO_BSEA 0x74
#define AHB_GIZMO_NOR 0x78
#define AHB_GIZMO_USB2 0x7c
#define AHB_GIZMO_USB3 0x80
#define IMMEDIATE BIT(18)
#define AHB_GIZMO_SDMMC1 0x84
#define AHB_GIZMO_SDMMC2 0x88
#define AHB_GIZMO_SDMMC3 0x8c
#define AHB_MEM_PREFETCH_CFG_X 0xdc
#define AHB_ARBITRATION_XBAR_CTRL 0xe0
#define AHB_MEM_PREFETCH_CFG3 0xe4
#define AHB_MEM_PREFETCH_CFG4 0xe8
#define AHB_MEM_PREFETCH_CFG1 0xf0
#define AHB_MEM_PREFETCH_CFG2 0xf4
#define PREFETCH_ENB BIT(31)
#define MST_ID(x) (((x) & 0x1f) << 26)
#define AHBDMA_MST_ID MST_ID(5)
#define USB_MST_ID MST_ID(6)
#define USB2_MST_ID MST_ID(18)
#define USB3_MST_ID MST_ID(17)
#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xfc
#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
/*
* INCORRECT_BASE_ADDR_LOW_BYTE: Legacy kernel DT files for Tegra SoCs
* prior to Tegra124 generally use a physical base address ending in
* 0x4 for the AHB IP block. According to the TRM, the low byte
* should be 0x0. During device probing, this macro is used to detect
* whether the passed-in physical address is incorrect, and if so, to
* correct it.
*/
#define INCORRECT_BASE_ADDR_LOW_BYTE 0x4
static struct platform_driver tegra_ahb_driver;
static const u32 tegra_ahb_gizmo[] = {
AHB_ARBITRATION_DISABLE,
AHB_ARBITRATION_PRIORITY_CTRL,
AHB_GIZMO_AHB_MEM,
AHB_GIZMO_APB_DMA,
AHB_GIZMO_IDE,
AHB_GIZMO_USB,
AHB_GIZMO_AHB_XBAR_BRIDGE,
AHB_GIZMO_CPU_AHB_BRIDGE,
AHB_GIZMO_COP_AHB_BRIDGE,
AHB_GIZMO_XBAR_APB_CTLR,
AHB_GIZMO_VCP_AHB_BRIDGE,
AHB_GIZMO_NAND,
AHB_GIZMO_SDMMC4,
AHB_GIZMO_XIO,
AHB_GIZMO_BSEV,
AHB_GIZMO_BSEA,
AHB_GIZMO_NOR,
AHB_GIZMO_USB2,
AHB_GIZMO_USB3,
AHB_GIZMO_SDMMC1,
AHB_GIZMO_SDMMC2,
AHB_GIZMO_SDMMC3,
AHB_MEM_PREFETCH_CFG_X,
AHB_ARBITRATION_XBAR_CTRL,
AHB_MEM_PREFETCH_CFG3,
AHB_MEM_PREFETCH_CFG4,
AHB_MEM_PREFETCH_CFG1,
AHB_MEM_PREFETCH_CFG2,
AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID,
};
struct tegra_ahb {
void __iomem *regs;
struct device *dev;
u32 ctx[];
};
static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
{
return readl(ahb->regs + offset);
}
static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
{
writel(value, ahb->regs + offset);
}
#ifdef CONFIG_TEGRA_IOMMU_SMMU
int tegra_ahb_enable_smmu(struct device_node *dn)
{
struct device *dev;
u32 val;
struct tegra_ahb *ahb;
dev = driver_find_device_by_of_node(&tegra_ahb_driver.driver, dn);
if (!dev)
return -EPROBE_DEFER;
ahb = dev_get_drvdata(dev);
val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
return 0;
}
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
static int __maybe_unused tegra_ahb_suspend(struct device *dev)
{
int i;
struct tegra_ahb *ahb = dev_get_drvdata(dev);
for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]);
return 0;
}
static int __maybe_unused tegra_ahb_resume(struct device *dev)
{
int i;
struct tegra_ahb *ahb = dev_get_drvdata(dev);
for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
return 0;
}
static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
tegra_ahb_suspend,
tegra_ahb_resume, NULL);
static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
{
u32 val;
val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM);
val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM);
val = gizmo_readl(ahb, AHB_GIZMO_USB);
val |= IMMEDIATE;
gizmo_writel(ahb, val, AHB_GIZMO_USB);
val = gizmo_readl(ahb, AHB_GIZMO_USB2);
val |= IMMEDIATE;
gizmo_writel(ahb, val, AHB_GIZMO_USB2);
val = gizmo_readl(ahb, AHB_GIZMO_USB3);
val |= IMMEDIATE;
gizmo_writel(ahb, val, AHB_GIZMO_USB3);
val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL);
val |= PRIORITY_SELECT_USB |
PRIORITY_SELECT_USB2 |
PRIORITY_SELECT_USB3 |
AHB_PRIORITY_WEIGHT(7);
gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL);
val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1);
val &= ~MST_ID(~0);
val |= PREFETCH_ENB |
AHBDMA_MST_ID |
ADDR_BNDRY(0xc) |
INACTIVITY_TIMEOUT(0x1000);
gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1);
val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2);
val &= ~MST_ID(~0);
val |= PREFETCH_ENB |
USB_MST_ID |
ADDR_BNDRY(0xc) |
INACTIVITY_TIMEOUT(0x1000);
gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2);
val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3);
val &= ~MST_ID(~0);
val |= PREFETCH_ENB |
USB3_MST_ID |
ADDR_BNDRY(0xc) |
INACTIVITY_TIMEOUT(0x1000);
gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3);
val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4);
val &= ~MST_ID(~0);
val |= PREFETCH_ENB |
USB2_MST_ID |
ADDR_BNDRY(0xc) |
INACTIVITY_TIMEOUT(0x1000);
gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
}
static int tegra_ahb_probe(struct platform_device *pdev)
{
struct resource *res;
struct tegra_ahb *ahb;
size_t bytes;
bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo);
ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
if (!ahb)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* Correct the IP block base address if necessary */
if (res &&
(res->start & INCORRECT_BASE_ADDR_LOW_BYTE) ==
INCORRECT_BASE_ADDR_LOW_BYTE) {
dev_warn(&pdev->dev, "incorrect AHB base address in DT data - enabling workaround\n");
res->start -= INCORRECT_BASE_ADDR_LOW_BYTE;
}
ahb->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ahb->regs))
return PTR_ERR(ahb->regs);
ahb->dev = &pdev->dev;
platform_set_drvdata(pdev, ahb);
tegra_ahb_gizmo_init(ahb);
return 0;
}
static const struct of_device_id tegra_ahb_of_match[] = {
{ .compatible = "nvidia,tegra30-ahb", },
{ .compatible = "nvidia,tegra20-ahb", },
{},
};
static struct platform_driver tegra_ahb_driver = {
.probe = tegra_ahb_probe,
.driver = {
.name = DRV_NAME,
.of_match_table = tegra_ahb_of_match,
.pm = &tegra_ahb_pm,
},
};
module_platform_driver(tegra_ahb_driver);
MODULE_AUTHOR("Hiroshi DOYU <[email protected]>");
MODULE_DESCRIPTION("Tegra AHB driver");
MODULE_ALIAS("platform:" DRV_NAME);
|
linux-master
|
drivers/amba/tegra-ahb.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* Derived from Applicom driver ac.c for SCO Unix */
/* Ported by David Woodhouse, Axiom (Cambridge) Ltd. */
/* [email protected] 30/8/98 */
/* $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $ */
/* This module is for Linux 2.1 and 2.2 series kernels. */
/*****************************************************************************/
/* J PAGET 18/02/94 passage V2.4.2 ioctl avec code 2 reset to les interrupt */
/* ceci pour reseter correctement apres une sortie sauvage */
/* J PAGET 02/05/94 passage V2.4.3 dans le traitement de d'interruption, */
/* LoopCount n'etait pas initialise a 0. */
/* F LAFORSE 04/07/95 version V2.6.0 lecture bidon apres acces a une carte */
/* pour liberer le bus */
/* J.PAGET 19/11/95 version V2.6.1 Nombre, addresse,irq n'est plus configure */
/* et passe en argument a acinit, mais est scrute sur le bus pour s'adapter */
/* au nombre de cartes presentes sur le bus. IOCL code 6 affichait V2.4.3 */
/* F.LAFORSE 28/11/95 creation de fichiers acXX.o avec les differentes */
/* addresses de base des cartes, IOCTL 6 plus complet */
/* J.PAGET le 19/08/96 copie de la version V2.6 en V2.8.0 sans modification */
/* de code autre que le texte V2.6.1 en V2.8.0 */
/*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/nospec.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include "applicom.h"
/* NOTE: We use for loops with {write,read}b() instead of
memcpy_{from,to}io throughout this driver. This is because
the board doesn't correctly handle word accesses - only
bytes.
*/
#undef DEBUG
#define MAX_BOARD 8 /* maximum of pc board possible */
#define MAX_ISA_BOARD 4
#define LEN_RAM_IO 0x800
#ifndef PCI_VENDOR_ID_APPLICOM
#define PCI_VENDOR_ID_APPLICOM 0x1389
#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001
#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
#endif
static DEFINE_MUTEX(ac_mutex);
static char *applicom_pci_devnames[] = {
"PCI board",
"PCI2000IBS / PCI2000CAN",
"PCI2000PFB"
};
static const struct pci_device_id applicom_pci_tbl[] = {
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, applicom_pci_tbl);
MODULE_AUTHOR("David Woodhouse & Applicom International");
MODULE_DESCRIPTION("Driver for Applicom Profibus card");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(AC_MINOR);
static struct applicom_board {
unsigned long PhysIO;
void __iomem *RamIO;
wait_queue_head_t FlagSleepSend;
long irq;
spinlock_t mutex;
} apbs[MAX_BOARD];
static unsigned int irq; /* interrupt number IRQ */
static unsigned long mem; /* physical segment of board */
module_param_hw(irq, uint, irq, 0);
MODULE_PARM_DESC(irq, "IRQ of the Applicom board");
module_param_hw(mem, ulong, iomem, 0);
MODULE_PARM_DESC(mem, "Shared Memory Address of Applicom board");
static unsigned int numboards; /* number of installed boards */
static volatile unsigned char Dummy;
static DECLARE_WAIT_QUEUE_HEAD(FlagSleepRec);
static unsigned int WriteErrorCount; /* number of write error */
static unsigned int ReadErrorCount; /* number of read error */
static unsigned int DeviceErrorCount; /* number of device error */
static ssize_t ac_read (struct file *, char __user *, size_t, loff_t *);
static ssize_t ac_write (struct file *, const char __user *, size_t, loff_t *);
static long ac_ioctl(struct file *, unsigned int, unsigned long);
static irqreturn_t ac_interrupt(int, void *);
static const struct file_operations ac_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = ac_read,
.write = ac_write,
.unlocked_ioctl = ac_ioctl,
};
static struct miscdevice ac_miscdev = {
AC_MINOR,
"ac",
&ac_fops
};
static int dummy; /* dev_id for request_irq() */
static int ac_register_board(unsigned long physloc, void __iomem *loc,
unsigned char boardno)
{
volatile unsigned char byte_reset_it;
if((readb(loc + CONF_END_TEST) != 0x00) ||
(readb(loc + CONF_END_TEST + 1) != 0x55) ||
(readb(loc + CONF_END_TEST + 2) != 0xAA) ||
(readb(loc + CONF_END_TEST + 3) != 0xFF))
return 0;
if (!boardno)
boardno = readb(loc + NUMCARD_OWNER_TO_PC);
if (!boardno || boardno > MAX_BOARD) {
printk(KERN_WARNING "Board #%d (at 0x%lx) is out of range (1 <= x <= %d).\n",
boardno, physloc, MAX_BOARD);
return 0;
}
if (apbs[boardno - 1].RamIO) {
printk(KERN_WARNING "Board #%d (at 0x%lx) conflicts with previous board #%d (at 0x%lx)\n",
boardno, physloc, boardno, apbs[boardno-1].PhysIO);
return 0;
}
boardno--;
apbs[boardno].PhysIO = physloc;
apbs[boardno].RamIO = loc;
init_waitqueue_head(&apbs[boardno].FlagSleepSend);
spin_lock_init(&apbs[boardno].mutex);
byte_reset_it = readb(loc + RAM_IT_TO_PC);
numboards++;
return boardno + 1;
}
static void __exit applicom_exit(void)
{
unsigned int i;
misc_deregister(&ac_miscdev);
for (i = 0; i < MAX_BOARD; i++) {
if (!apbs[i].RamIO)
continue;
if (apbs[i].irq)
free_irq(apbs[i].irq, &dummy);
iounmap(apbs[i].RamIO);
}
}
static int __init applicom_init(void)
{
int i, numisa = 0;
struct pci_dev *dev = NULL;
void __iomem *RamIO;
int boardno, ret;
printk(KERN_INFO "Applicom driver: $Id: ac.c,v 1.30 2000/03/22 16:03:57 dwmw2 Exp $\n");
/* No mem and irq given - check for a PCI card */
while ( (dev = pci_get_class(PCI_CLASS_OTHERS << 16, dev))) {
if (!pci_match_id(applicom_pci_tbl, dev))
continue;
if (pci_enable_device(dev)) {
pci_dev_put(dev);
return -EIO;
}
RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
if (!RamIO) {
printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
"space at 0x%llx\n",
(unsigned long long)pci_resource_start(dev, 0));
pci_disable_device(dev);
pci_dev_put(dev);
return -EIO;
}
printk(KERN_INFO "Applicom %s found at mem 0x%llx, irq %d\n",
applicom_pci_devnames[dev->device-1],
(unsigned long long)pci_resource_start(dev, 0),
dev->irq);
boardno = ac_register_board(pci_resource_start(dev, 0),
RamIO, 0);
if (!boardno) {
printk(KERN_INFO "ac.o: PCI Applicom device doesn't have correct signature.\n");
iounmap(RamIO);
pci_disable_device(dev);
continue;
}
if (request_irq(dev->irq, &ac_interrupt, IRQF_SHARED, "Applicom PCI", &dummy)) {
printk(KERN_INFO "Could not allocate IRQ %d for PCI Applicom device.\n", dev->irq);
iounmap(RamIO);
pci_disable_device(dev);
apbs[boardno - 1].RamIO = NULL;
continue;
}
/* Enable interrupts. */
writeb(0x40, apbs[boardno - 1].RamIO + RAM_IT_FROM_PC);
apbs[boardno - 1].irq = dev->irq;
}
/* Finished with PCI cards. If none registered,
* and there was no mem/irq specified, exit */
if (!mem || !irq) {
if (numboards)
goto fin;
else {
printk(KERN_INFO "ac.o: No PCI boards found.\n");
printk(KERN_INFO "ac.o: For an ISA board you must supply memory and irq parameters.\n");
return -ENXIO;
}
}
/* Now try the specified ISA cards */
for (i = 0; i < MAX_ISA_BOARD; i++) {
RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
if (!RamIO) {
printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
continue;
}
if (!(boardno = ac_register_board((unsigned long)mem+ (LEN_RAM_IO*i),
RamIO,i+1))) {
iounmap(RamIO);
continue;
}
printk(KERN_NOTICE "Applicom ISA card found at mem 0x%lx, irq %d\n", mem + (LEN_RAM_IO*i), irq);
if (!numisa) {
if (request_irq(irq, &ac_interrupt, IRQF_SHARED, "Applicom ISA", &dummy)) {
printk(KERN_WARNING "Could not allocate IRQ %d for ISA Applicom device.\n", irq);
iounmap(RamIO);
apbs[boardno - 1].RamIO = NULL;
}
else
apbs[boardno - 1].irq = irq;
}
else
apbs[boardno - 1].irq = 0;
numisa++;
}
if (!numisa)
printk(KERN_WARNING "ac.o: No valid ISA Applicom boards found "
"at mem 0x%lx\n", mem);
fin:
init_waitqueue_head(&FlagSleepRec);
WriteErrorCount = 0;
ReadErrorCount = 0;
DeviceErrorCount = 0;
if (numboards) {
ret = misc_register(&ac_miscdev);
if (ret) {
printk(KERN_WARNING "ac.o: Unable to register misc device\n");
goto out;
}
for (i = 0; i < MAX_BOARD; i++) {
int serial;
char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
if (!apbs[i].RamIO)
continue;
for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
boardname[serial] = 0;
printk(KERN_INFO "Applicom board %d: %s, PROM V%d.%d",
i+1, boardname,
(int)(readb(apbs[i].RamIO + VERS) >> 4),
(int)(readb(apbs[i].RamIO + VERS) & 0xF));
serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) +
(readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) +
(readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );
if (serial != 0)
printk(" S/N %d\n", serial);
else
printk("\n");
}
return 0;
}
else
return -ENXIO;
out:
for (i = 0; i < MAX_BOARD; i++) {
if (!apbs[i].RamIO)
continue;
if (apbs[i].irq)
free_irq(apbs[i].irq, &dummy);
iounmap(apbs[i].RamIO);
}
return ret;
}
module_init(applicom_init);
module_exit(applicom_exit);
static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, loff_t * ppos)
{
unsigned int NumCard; /* Board number 1 -> 8 */
unsigned int IndexCard; /* Index board number 0 -> 7 */
unsigned char TicCard; /* Board TIC to send */
unsigned long flags; /* Current priority */
struct st_ram_io st_loc;
struct mailbox tmpmailbox;
#ifdef DEBUG
int c;
#endif
DECLARE_WAITQUEUE(wait, current);
if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
static int warncount = 5;
if (warncount) {
printk(KERN_INFO "Hmmm. write() of Applicom card, length %zd != expected %zd\n",
count, sizeof(struct st_ram_io) + sizeof(struct mailbox));
warncount--;
}
return -EINVAL;
}
if(copy_from_user(&st_loc, buf, sizeof(struct st_ram_io)))
return -EFAULT;
if(copy_from_user(&tmpmailbox, &buf[sizeof(struct st_ram_io)],
sizeof(struct mailbox)))
return -EFAULT;
NumCard = st_loc.num_card; /* board number to send */
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
IndexCard = NumCard - 1;
if (IndexCard >= MAX_BOARD)
return -EINVAL;
IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
if (!apbs[IndexCard].RamIO)
return -EINVAL;
#ifdef DEBUG
printk("Write to applicom card #%d. struct st_ram_io follows:",
IndexCard+1);
for (c = 0; c < sizeof(struct st_ram_io);) {
printk("\n%5.5X: %2.2X", c, ((unsigned char *) &st_loc)[c]);
for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
printk(" %2.2X", ((unsigned char *) &st_loc)[c]);
}
}
printk("\nstruct mailbox follows:");
for (c = 0; c < sizeof(struct mailbox);) {
printk("\n%5.5X: %2.2X", c, ((unsigned char *) &tmpmailbox)[c]);
for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
printk(" %2.2X", ((unsigned char *) &tmpmailbox)[c]);
}
}
printk("\n");
#endif
spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
/* Test octet ready correct */
if(readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) > 2) {
Dummy = readb(apbs[IndexCard].RamIO + VERS);
spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
printk(KERN_WARNING "APPLICOM driver write error board %d, DataFromPcReady = %d\n",
IndexCard,(int)readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY));
DeviceErrorCount++;
return -EIO;
}
/* Place ourselves on the wait queue */
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);
/* Check whether the card is ready for us */
while (readb(apbs[IndexCard].RamIO + DATA_FROM_PC_READY) != 0) {
Dummy = readb(apbs[IndexCard].RamIO + VERS);
/* It's busy. Sleep. */
spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
schedule();
if (signal_pending(current)) {
remove_wait_queue(&apbs[IndexCard].FlagSleepSend,
&wait);
return -EINTR;
}
spin_lock_irqsave(&apbs[IndexCard].mutex, flags);
set_current_state(TASK_INTERRUPTIBLE);
}
/* We may not have actually slept */
set_current_state(TASK_RUNNING);
remove_wait_queue(&apbs[IndexCard].FlagSleepSend, &wait);
writeb(1, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
/* Which is best - lock down the pages with rawio and then
copy directly, or use bounce buffers? For now we do the latter
because it works with 2.2 still */
{
unsigned char *from = (unsigned char *) &tmpmailbox;
void __iomem *to = apbs[IndexCard].RamIO + RAM_FROM_PC;
int c;
for (c = 0; c < sizeof(struct mailbox); c++)
writeb(*(from++), to++);
}
writeb(0x20, apbs[IndexCard].RamIO + TIC_OWNER_FROM_PC);
writeb(0xff, apbs[IndexCard].RamIO + NUMCARD_OWNER_FROM_PC);
writeb(TicCard, apbs[IndexCard].RamIO + TIC_DES_FROM_PC);
writeb(NumCard, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
writeb(2, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
Dummy = readb(apbs[IndexCard].RamIO + VERS);
spin_unlock_irqrestore(&apbs[IndexCard].mutex, flags);
return 0;
}
static int do_ac_read(int IndexCard, char __user *buf,
struct st_ram_io *st_loc, struct mailbox *mailbox)
{
void __iomem *from = apbs[IndexCard].RamIO + RAM_TO_PC;
unsigned char *to = (unsigned char *)mailbox;
#ifdef DEBUG
int c;
#endif
st_loc->tic_owner_to_pc = readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC);
st_loc->numcard_owner_to_pc = readb(apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
{
int c;
for (c = 0; c < sizeof(struct mailbox); c++)
*(to++) = readb(from++);
}
writeb(1, apbs[IndexCard].RamIO + ACK_FROM_PC_READY);
writeb(1, apbs[IndexCard].RamIO + TYP_ACK_FROM_PC);
writeb(IndexCard+1, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
writeb(readb(apbs[IndexCard].RamIO + TIC_OWNER_TO_PC),
apbs[IndexCard].RamIO + TIC_ACK_FROM_PC);
writeb(2, apbs[IndexCard].RamIO + ACK_FROM_PC_READY);
writeb(0, apbs[IndexCard].RamIO + DATA_TO_PC_READY);
writeb(2, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
Dummy = readb(apbs[IndexCard].RamIO + VERS);
#ifdef DEBUG
printk("Read from applicom card #%d. struct st_ram_io follows:", NumCard);
for (c = 0; c < sizeof(struct st_ram_io);) {
printk("\n%5.5X: %2.2X", c, ((unsigned char *)st_loc)[c]);
for (c++; c % 8 && c < sizeof(struct st_ram_io); c++) {
printk(" %2.2X", ((unsigned char *)st_loc)[c]);
}
}
printk("\nstruct mailbox follows:");
for (c = 0; c < sizeof(struct mailbox);) {
printk("\n%5.5X: %2.2X", c, ((unsigned char *)mailbox)[c]);
for (c++; c % 8 && c < sizeof(struct mailbox); c++) {
printk(" %2.2X", ((unsigned char *)mailbox)[c]);
}
}
printk("\n");
#endif
return (sizeof(struct st_ram_io) + sizeof(struct mailbox));
}
static ssize_t ac_read (struct file *filp, char __user *buf, size_t count, loff_t *ptr)
{
unsigned long flags;
unsigned int i;
unsigned char tmp;
int ret = 0;
DECLARE_WAITQUEUE(wait, current);
#ifdef DEBUG
int loopcount=0;
#endif
/* No need to ratelimit this. Only root can trigger it anyway */
if (count != sizeof(struct st_ram_io) + sizeof(struct mailbox)) {
printk( KERN_WARNING "Hmmm. read() of Applicom card, length %zd != expected %zd\n",
count,sizeof(struct st_ram_io) + sizeof(struct mailbox));
return -EINVAL;
}
while(1) {
/* Stick ourself on the wait queue */
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&FlagSleepRec, &wait);
/* Scan each board, looking for one which has a packet for us */
for (i=0; i < MAX_BOARD; i++) {
if (!apbs[i].RamIO)
continue;
spin_lock_irqsave(&apbs[i].mutex, flags);
tmp = readb(apbs[i].RamIO + DATA_TO_PC_READY);
if (tmp == 2) {
struct st_ram_io st_loc;
struct mailbox mailbox;
/* Got a packet for us */
memset(&st_loc, 0, sizeof(st_loc));
ret = do_ac_read(i, buf, &st_loc, &mailbox);
spin_unlock_irqrestore(&apbs[i].mutex, flags);
set_current_state(TASK_RUNNING);
remove_wait_queue(&FlagSleepRec, &wait);
if (copy_to_user(buf, &st_loc, sizeof(st_loc)))
return -EFAULT;
if (copy_to_user(buf + sizeof(st_loc), &mailbox, sizeof(mailbox)))
return -EFAULT;
return tmp;
}
if (tmp > 2) {
/* Got an error */
Dummy = readb(apbs[i].RamIO + VERS);
spin_unlock_irqrestore(&apbs[i].mutex, flags);
set_current_state(TASK_RUNNING);
remove_wait_queue(&FlagSleepRec, &wait);
printk(KERN_WARNING "APPLICOM driver read error board %d, DataToPcReady = %d\n",
i,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
DeviceErrorCount++;
return -EIO;
}
/* Nothing for us. Try the next board */
Dummy = readb(apbs[i].RamIO + VERS);
spin_unlock_irqrestore(&apbs[i].mutex, flags);
} /* per board */
/* OK - No boards had data for us. Sleep now */
schedule();
remove_wait_queue(&FlagSleepRec, &wait);
if (signal_pending(current))
return -EINTR;
#ifdef DEBUG
if (loopcount++ > 2) {
printk(KERN_DEBUG "Looping in ac_read. loopcount %d\n", loopcount);
}
#endif
}
}
static irqreturn_t ac_interrupt(int vec, void *dev_instance)
{
unsigned int i;
unsigned int FlagInt;
unsigned int LoopCount;
int handled = 0;
// printk("Applicom interrupt on IRQ %d occurred\n", vec);
LoopCount = 0;
do {
FlagInt = 0;
for (i = 0; i < MAX_BOARD; i++) {
/* Skip if this board doesn't exist */
if (!apbs[i].RamIO)
continue;
spin_lock(&apbs[i].mutex);
/* Skip if this board doesn't want attention */
if(readb(apbs[i].RamIO + RAM_IT_TO_PC) == 0) {
spin_unlock(&apbs[i].mutex);
continue;
}
handled = 1;
FlagInt = 1;
writeb(0, apbs[i].RamIO + RAM_IT_TO_PC);
if (readb(apbs[i].RamIO + DATA_TO_PC_READY) > 2) {
printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataToPcReady = %d\n",
i+1,(int)readb(apbs[i].RamIO + DATA_TO_PC_READY));
DeviceErrorCount++;
}
if((readb(apbs[i].RamIO + DATA_FROM_PC_READY) > 2) &&
(readb(apbs[i].RamIO + DATA_FROM_PC_READY) != 6)) {
printk(KERN_WARNING "APPLICOM driver interrupt err board %d, DataFromPcReady = %d\n",
i+1,(int)readb(apbs[i].RamIO + DATA_FROM_PC_READY));
DeviceErrorCount++;
}
if (readb(apbs[i].RamIO + DATA_TO_PC_READY) == 2) { /* mailbox sent by the card ? */
if (waitqueue_active(&FlagSleepRec)) {
wake_up_interruptible(&FlagSleepRec);
}
}
if (readb(apbs[i].RamIO + DATA_FROM_PC_READY) == 0) { /* ram i/o free for write by pc ? */
if (waitqueue_active(&apbs[i].FlagSleepSend)) { /* process sleep during read ? */
wake_up_interruptible(&apbs[i].FlagSleepSend);
}
}
Dummy = readb(apbs[i].RamIO + VERS);
if(readb(apbs[i].RamIO + RAM_IT_TO_PC)) {
/* There's another int waiting on this card */
spin_unlock(&apbs[i].mutex);
i--;
} else {
spin_unlock(&apbs[i].mutex);
}
}
if (FlagInt)
LoopCount = 0;
else
LoopCount++;
} while(LoopCount < 2);
return IRQ_RETVAL(handled);
}
static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ /* @ ADG ou ATO selon le cas */
int i;
unsigned char IndexCard;
void __iomem *pmem;
int ret = 0;
static int warncount = 10;
volatile unsigned char byte_reset_it;
struct st_ram_io *adgl;
void __user *argp = (void __user *)arg;
/* In general, the device is only openable by root anyway, so we're not
particularly concerned that bogus ioctls can flood the console. */
adgl = memdup_user(argp, sizeof(struct st_ram_io));
if (IS_ERR(adgl))
return PTR_ERR(adgl);
mutex_lock(&ac_mutex);
IndexCard = adgl->num_card-1;
if (cmd != 6 && IndexCard >= MAX_BOARD)
goto err;
IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
if (cmd != 6 && !apbs[IndexCard].RamIO)
goto err;
switch (cmd) {
case 0:
pmem = apbs[IndexCard].RamIO;
for (i = 0; i < sizeof(struct st_ram_io); i++)
((unsigned char *)adgl)[i]=readb(pmem++);
if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
ret = -EFAULT;
break;
case 1:
pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
for (i = 0; i < 4; i++)
adgl->conf_end_test[i] = readb(pmem++);
for (i = 0; i < 2; i++)
adgl->error_code[i] = readb(pmem++);
for (i = 0; i < 4; i++)
adgl->parameter_error[i] = readb(pmem++);
pmem = apbs[IndexCard].RamIO + VERS;
adgl->vers = readb(pmem);
pmem = apbs[IndexCard].RamIO + TYPE_CARD;
for (i = 0; i < 20; i++)
adgl->reserv1[i] = readb(pmem++);
*(int *)&adgl->reserv1[20] =
(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER) << 16) +
(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 1) << 8) +
(readb(apbs[IndexCard].RamIO + SERIAL_NUMBER + 2) );
if (copy_to_user(argp, adgl, sizeof(struct st_ram_io)))
ret = -EFAULT;
break;
case 2:
pmem = apbs[IndexCard].RamIO + CONF_END_TEST;
for (i = 0; i < 10; i++)
writeb(0xff, pmem++);
writeb(adgl->data_from_pc_ready,
apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
for (i = 0; i < MAX_BOARD; i++) {
if (apbs[i].RamIO) {
byte_reset_it = readb(apbs[i].RamIO + RAM_IT_TO_PC);
}
}
break;
case 3:
pmem = apbs[IndexCard].RamIO + TIC_DES_FROM_PC;
writeb(adgl->tic_des_from_pc, pmem);
break;
case 4:
pmem = apbs[IndexCard].RamIO + TIC_OWNER_TO_PC;
adgl->tic_owner_to_pc = readb(pmem++);
adgl->numcard_owner_to_pc = readb(pmem);
if (copy_to_user(argp, adgl,sizeof(struct st_ram_io)))
ret = -EFAULT;
break;
case 5:
writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_OWNER_TO_PC);
writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_DES_FROM_PC);
writeb(adgl->num_card, apbs[IndexCard].RamIO + NUMCARD_ACK_FROM_PC);
writeb(4, apbs[IndexCard].RamIO + DATA_FROM_PC_READY);
writeb(1, apbs[IndexCard].RamIO + RAM_IT_FROM_PC);
break;
case 6:
printk(KERN_INFO "APPLICOM driver release .... V2.8.0 ($Revision: 1.30 $)\n");
printk(KERN_INFO "Number of installed boards . %d\n", (int) numboards);
printk(KERN_INFO "Segment of board ........... %X\n", (int) mem);
printk(KERN_INFO "Interrupt IRQ number ....... %d\n", (int) irq);
for (i = 0; i < MAX_BOARD; i++) {
int serial;
char boardname[(SERIAL_NUMBER - TYPE_CARD) + 1];
if (!apbs[i].RamIO)
continue;
for (serial = 0; serial < SERIAL_NUMBER - TYPE_CARD; serial++)
boardname[serial] = readb(apbs[i].RamIO + TYPE_CARD + serial);
boardname[serial] = 0;
printk(KERN_INFO "Prom version board %d ....... V%d.%d %s",
i+1,
(int)(readb(apbs[i].RamIO + VERS) >> 4),
(int)(readb(apbs[i].RamIO + VERS) & 0xF),
boardname);
serial = (readb(apbs[i].RamIO + SERIAL_NUMBER) << 16) +
(readb(apbs[i].RamIO + SERIAL_NUMBER + 1) << 8) +
(readb(apbs[i].RamIO + SERIAL_NUMBER + 2) );
if (serial != 0)
printk(" S/N %d\n", serial);
else
printk("\n");
}
if (DeviceErrorCount != 0)
printk(KERN_INFO "DeviceErrorCount ........... %d\n", DeviceErrorCount);
if (ReadErrorCount != 0)
printk(KERN_INFO "ReadErrorCount ............. %d\n", ReadErrorCount);
if (WriteErrorCount != 0)
printk(KERN_INFO "WriteErrorCount ............ %d\n", WriteErrorCount);
if (waitqueue_active(&FlagSleepRec))
printk(KERN_INFO "Process in read pending\n");
for (i = 0; i < MAX_BOARD; i++) {
if (apbs[i].RamIO && waitqueue_active(&apbs[i].FlagSleepSend))
printk(KERN_INFO "Process in write pending board %d\n",i+1);
}
break;
default:
ret = -ENOTTY;
break;
}
Dummy = readb(apbs[IndexCard].RamIO + VERS);
kfree(adgl);
mutex_unlock(&ac_mutex);
return ret;
err:
if (warncount) {
pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
(int)IndexCard + 1);
warncount--;
}
kfree(adgl);
mutex_unlock(&ac_mutex);
return -EINVAL;
}
|
linux-master
|
drivers/char/applicom.c
|
/*
* Telecom Clock driver for Intel NetStructure(tm) MPCBL0010
*
* Copyright (C) 2005 Kontron Canada
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <[email protected]> and the current
* Maintainer <[email protected]>
*
* Description : This is the TELECOM CLOCK module driver for the ATCA
* MPCBL0010 ATCA computer.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h> /* printk() */
#include <linux/fs.h> /* everything... */
#include <linux/errno.h> /* error codes */
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/sysfs.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <asm/io.h> /* inb/outb */
#include <linux/uaccess.h>
MODULE_AUTHOR("Sebastien Bouchard <[email protected]>");
MODULE_LICENSE("GPL");
/*Hardware Reset of the PLL */
#define RESET_ON 0x00
#define RESET_OFF 0x01
/* MODE SELECT */
#define NORMAL_MODE 0x00
#define HOLDOVER_MODE 0x10
#define FREERUN_MODE 0x20
/* FILTER SELECT */
#define FILTER_6HZ 0x04
#define FILTER_12HZ 0x00
/* SELECT REFERENCE FREQUENCY */
#define REF_CLK1_8kHz 0x00
#define REF_CLK2_19_44MHz 0x02
/* Select primary or secondary redundant clock */
#define PRIMARY_CLOCK 0x00
#define SECONDARY_CLOCK 0x01
/* CLOCK TRANSMISSION DEFINE */
#define CLK_8kHz 0xff
#define CLK_16_384MHz 0xfb
#define CLK_1_544MHz 0x00
#define CLK_2_048MHz 0x01
#define CLK_4_096MHz 0x02
#define CLK_6_312MHz 0x03
#define CLK_8_192MHz 0x04
#define CLK_19_440MHz 0x06
#define CLK_8_592MHz 0x08
#define CLK_11_184MHz 0x09
#define CLK_34_368MHz 0x0b
#define CLK_44_736MHz 0x0a
/* RECEIVED REFERENCE */
#define AMC_B1 0
#define AMC_B2 1
/* HARDWARE SWITCHING DEFINE */
#define HW_ENABLE 0x80
#define HW_DISABLE 0x00
/* HARDWARE SWITCHING MODE DEFINE */
#define PLL_HOLDOVER 0x40
#define LOST_CLOCK 0x00
/* ALARMS DEFINE */
#define UNLOCK_MASK 0x10
#define HOLDOVER_MASK 0x20
#define SEC_LOST_MASK 0x40
#define PRI_LOST_MASK 0x80
/* INTERRUPT CAUSE DEFINE */
#define PRI_LOS_01_MASK 0x01
#define PRI_LOS_10_MASK 0x02
#define SEC_LOS_01_MASK 0x04
#define SEC_LOS_10_MASK 0x08
#define HOLDOVER_01_MASK 0x10
#define HOLDOVER_10_MASK 0x20
#define UNLOCK_01_MASK 0x40
#define UNLOCK_10_MASK 0x80
struct tlclk_alarms {
__u32 lost_clocks;
__u32 lost_primary_clock;
__u32 lost_secondary_clock;
__u32 primary_clock_back;
__u32 secondary_clock_back;
__u32 switchover_primary;
__u32 switchover_secondary;
__u32 pll_holdover;
__u32 pll_end_holdover;
__u32 pll_lost_sync;
__u32 pll_sync;
};
/* Telecom clock I/O register definition */
#define TLCLK_BASE 0xa08
#define TLCLK_REG0 TLCLK_BASE
#define TLCLK_REG1 (TLCLK_BASE+1)
#define TLCLK_REG2 (TLCLK_BASE+2)
#define TLCLK_REG3 (TLCLK_BASE+3)
#define TLCLK_REG4 (TLCLK_BASE+4)
#define TLCLK_REG5 (TLCLK_BASE+5)
#define TLCLK_REG6 (TLCLK_BASE+6)
#define TLCLK_REG7 (TLCLK_BASE+7)
#define SET_PORT_BITS(port, mask, val) outb(((inb(port) & mask) | val), port)
/* 0 = Dynamic allocation of the major device number */
#define TLCLK_MAJOR 0
/* sysfs interface definition:
Upon loading the driver will create a sysfs directory under
/sys/devices/platform/telco_clock.
This directory exports the following interfaces. There operation is
documented in the MCPBL0010 TPS under the Telecom Clock API section, 11.4.
alarms :
current_ref :
received_ref_clk3a :
received_ref_clk3b :
enable_clk3a_output :
enable_clk3b_output :
enable_clka0_output :
enable_clka1_output :
enable_clkb0_output :
enable_clkb1_output :
filter_select :
hardware_switching :
hardware_switching_mode :
telclock_version :
mode_select :
refalign :
reset :
select_amcb1_transmit_clock :
select_amcb2_transmit_clock :
select_redundant_clock :
select_ref_frequency :
All sysfs interfaces are integers in hex format, i.e echo 99 > refalign
has the same effect as echo 0x99 > refalign.
*/
static unsigned int telclk_interrupt;
static int int_events; /* Event that generate a interrupt */
static int got_event; /* if events processing have been done */
static void switchover_timeout(struct timer_list *t);
static struct timer_list switchover_timer;
static unsigned long tlclk_timer_data;
static struct tlclk_alarms *alarm_events;
static DEFINE_SPINLOCK(event_lock);
static int tlclk_major = TLCLK_MAJOR;
static irqreturn_t tlclk_interrupt(int irq, void *dev_id);
static DECLARE_WAIT_QUEUE_HEAD(wq);
static unsigned long useflags;
static DEFINE_MUTEX(tlclk_mutex);
static int tlclk_open(struct inode *inode, struct file *filp)
{
int result;
mutex_lock(&tlclk_mutex);
if (test_and_set_bit(0, &useflags)) {
result = -EBUSY;
/* this legacy device is always one per system and it doesn't
* know how to handle multiple concurrent clients.
*/
goto out;
}
/* Make sure there is no interrupt pending while
* initialising interrupt handler */
inb(TLCLK_REG6);
/* This device is wired through the FPGA IO space of the ATCA blade
* we can't share this IRQ */
result = request_irq(telclk_interrupt, &tlclk_interrupt,
0, "telco_clock", tlclk_interrupt);
if (result == -EBUSY)
printk(KERN_ERR "tlclk: Interrupt can't be reserved.\n");
else
inb(TLCLK_REG6); /* Clear interrupt events */
out:
mutex_unlock(&tlclk_mutex);
return result;
}
static int tlclk_release(struct inode *inode, struct file *filp)
{
free_irq(telclk_interrupt, tlclk_interrupt);
clear_bit(0, &useflags);
return 0;
}
static ssize_t tlclk_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
if (count < sizeof(struct tlclk_alarms))
return -EIO;
if (mutex_lock_interruptible(&tlclk_mutex))
return -EINTR;
wait_event_interruptible(wq, got_event);
if (copy_to_user(buf, alarm_events, sizeof(struct tlclk_alarms))) {
mutex_unlock(&tlclk_mutex);
return -EFAULT;
}
memset(alarm_events, 0, sizeof(struct tlclk_alarms));
got_event = 0;
mutex_unlock(&tlclk_mutex);
return sizeof(struct tlclk_alarms);
}
static const struct file_operations tlclk_fops = {
.read = tlclk_read,
.open = tlclk_open,
.release = tlclk_release,
.llseek = noop_llseek,
};
static struct miscdevice tlclk_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "telco_clock",
.fops = &tlclk_fops,
};
static ssize_t show_current_ref(struct device *d,
struct device_attribute *attr, char *buf)
{
unsigned long ret_val;
unsigned long flags;
spin_lock_irqsave(&event_lock, flags);
ret_val = ((inb(TLCLK_REG1) & 0x08) >> 3);
spin_unlock_irqrestore(&event_lock, flags);
return sprintf(buf, "0x%lX\n", ret_val);
}
static DEVICE_ATTR(current_ref, S_IRUGO, show_current_ref, NULL);
static ssize_t show_telclock_version(struct device *d,
struct device_attribute *attr, char *buf)
{
unsigned long ret_val;
unsigned long flags;
spin_lock_irqsave(&event_lock, flags);
ret_val = inb(TLCLK_REG5);
spin_unlock_irqrestore(&event_lock, flags);
return sprintf(buf, "0x%lX\n", ret_val);
}
static DEVICE_ATTR(telclock_version, S_IRUGO,
show_telclock_version, NULL);
static ssize_t show_alarms(struct device *d,
struct device_attribute *attr, char *buf)
{
unsigned long ret_val;
unsigned long flags;
spin_lock_irqsave(&event_lock, flags);
ret_val = (inb(TLCLK_REG2) & 0xf0);
spin_unlock_irqrestore(&event_lock, flags);
return sprintf(buf, "0x%lX\n", ret_val);
}
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static ssize_t store_received_ref_clk3a(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, ": tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG1, 0xef, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(received_ref_clk3a, (S_IWUSR|S_IWGRP), NULL,
store_received_ref_clk3a);
static ssize_t store_received_ref_clk3b(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, ": tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG1, 0xdf, val << 1);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(received_ref_clk3b, (S_IWUSR|S_IWGRP), NULL,
store_received_ref_clk3b);
static ssize_t store_enable_clk3b_output(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, ": tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG3, 0x7f, val << 7);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(enable_clk3b_output, (S_IWUSR|S_IWGRP), NULL,
store_enable_clk3b_output);
static ssize_t store_enable_clk3a_output(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long flags;
unsigned long tmp;
unsigned char val;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG3, 0xbf, val << 6);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(enable_clk3a_output, (S_IWUSR|S_IWGRP), NULL,
store_enable_clk3a_output);
static ssize_t store_enable_clkb1_output(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long flags;
unsigned long tmp;
unsigned char val;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG2, 0xf7, val << 3);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(enable_clkb1_output, (S_IWUSR|S_IWGRP), NULL,
store_enable_clkb1_output);
static ssize_t store_enable_clka1_output(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long flags;
unsigned long tmp;
unsigned char val;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG2, 0xfb, val << 2);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(enable_clka1_output, (S_IWUSR|S_IWGRP), NULL,
store_enable_clka1_output);
static ssize_t store_enable_clkb0_output(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long flags;
unsigned long tmp;
unsigned char val;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG2, 0xfd, val << 1);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(enable_clkb0_output, (S_IWUSR|S_IWGRP), NULL,
store_enable_clkb0_output);
static ssize_t store_enable_clka0_output(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long flags;
unsigned long tmp;
unsigned char val;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG2, 0xfe, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(enable_clka0_output, (S_IWUSR|S_IWGRP), NULL,
store_enable_clka0_output);
static ssize_t store_select_amcb2_transmit_clock(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long flags;
unsigned long tmp;
unsigned char val;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x28);
SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
} else if (val >= CLK_8_592MHz) {
SET_PORT_BITS(TLCLK_REG3, 0xc7, 0x38);
switch (val) {
case CLK_8_592MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
break;
case CLK_11_184MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
break;
case CLK_34_368MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
break;
case CLK_44_736MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
break;
}
} else {
SET_PORT_BITS(TLCLK_REG3, 0xc7, val << 3);
}
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(select_amcb2_transmit_clock, (S_IWUSR|S_IWGRP), NULL,
store_select_amcb2_transmit_clock);
static ssize_t store_select_amcb1_transmit_clock(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
if ((val == CLK_8kHz) || (val == CLK_16_384MHz)) {
SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x5);
SET_PORT_BITS(TLCLK_REG1, 0xfb, ~val);
} else if (val >= CLK_8_592MHz) {
SET_PORT_BITS(TLCLK_REG3, 0xf8, 0x7);
switch (val) {
case CLK_8_592MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 2);
break;
case CLK_11_184MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 0);
break;
case CLK_34_368MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 3);
break;
case CLK_44_736MHz:
SET_PORT_BITS(TLCLK_REG0, 0xfc, 1);
break;
}
} else {
SET_PORT_BITS(TLCLK_REG3, 0xf8, val);
}
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(select_amcb1_transmit_clock, (S_IWUSR|S_IWGRP), NULL,
store_select_amcb1_transmit_clock);
static ssize_t store_select_redundant_clock(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG1, 0xfe, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(select_redundant_clock, (S_IWUSR|S_IWGRP), NULL,
store_select_redundant_clock);
static ssize_t store_select_ref_frequency(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG1, 0xfd, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(select_ref_frequency, (S_IWUSR|S_IWGRP), NULL,
store_select_ref_frequency);
static ssize_t store_filter_select(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG0, 0xfb, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(filter_select, (S_IWUSR|S_IWGRP), NULL, store_filter_select);
static ssize_t store_hardware_switching_mode(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG0, 0xbf, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(hardware_switching_mode, (S_IWUSR|S_IWGRP), NULL,
store_hardware_switching_mode);
static ssize_t store_hardware_switching(struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG0, 0x7f, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(hardware_switching, (S_IWUSR|S_IWGRP), NULL,
store_hardware_switching);
static ssize_t store_refalign (struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0x08);
SET_PORT_BITS(TLCLK_REG0, 0xf7, 0);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(refalign, (S_IWUSR|S_IWGRP), NULL, store_refalign);
static ssize_t store_mode_select (struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG0, 0xcf, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(mode_select, (S_IWUSR|S_IWGRP), NULL, store_mode_select);
static ssize_t store_reset (struct device *d,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long tmp;
unsigned char val;
unsigned long flags;
sscanf(buf, "%lX", &tmp);
dev_dbg(d, "tmp = 0x%lX\n", tmp);
val = (unsigned char)tmp;
spin_lock_irqsave(&event_lock, flags);
SET_PORT_BITS(TLCLK_REG4, 0xfd, val);
spin_unlock_irqrestore(&event_lock, flags);
return strnlen(buf, count);
}
static DEVICE_ATTR(reset, (S_IWUSR|S_IWGRP), NULL, store_reset);
static struct attribute *tlclk_sysfs_entries[] = {
&dev_attr_current_ref.attr,
&dev_attr_telclock_version.attr,
&dev_attr_alarms.attr,
&dev_attr_received_ref_clk3a.attr,
&dev_attr_received_ref_clk3b.attr,
&dev_attr_enable_clk3a_output.attr,
&dev_attr_enable_clk3b_output.attr,
&dev_attr_enable_clkb1_output.attr,
&dev_attr_enable_clka1_output.attr,
&dev_attr_enable_clkb0_output.attr,
&dev_attr_enable_clka0_output.attr,
&dev_attr_select_amcb1_transmit_clock.attr,
&dev_attr_select_amcb2_transmit_clock.attr,
&dev_attr_select_redundant_clock.attr,
&dev_attr_select_ref_frequency.attr,
&dev_attr_filter_select.attr,
&dev_attr_hardware_switching_mode.attr,
&dev_attr_hardware_switching.attr,
&dev_attr_refalign.attr,
&dev_attr_mode_select.attr,
&dev_attr_reset.attr,
NULL
};
static const struct attribute_group tlclk_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = tlclk_sysfs_entries,
};
static struct platform_device *tlclk_device;
static int __init tlclk_init(void)
{
int ret;
telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
if (!alarm_events) {
ret = -ENOMEM;
goto out1;
}
ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
if (ret < 0) {
printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
kfree(alarm_events);
return ret;
}
tlclk_major = ret;
/* Read telecom clock IRQ number (Set by BIOS) */
if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
printk(KERN_ERR "tlclk: request_region 0x%X failed.\n",
TLCLK_BASE);
ret = -EBUSY;
goto out2;
}
if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
telclk_interrupt);
ret = -ENXIO;
goto out3;
}
timer_setup(&switchover_timer, switchover_timeout, 0);
ret = misc_register(&tlclk_miscdev);
if (ret < 0) {
printk(KERN_ERR "tlclk: misc_register returns %d.\n", ret);
goto out3;
}
tlclk_device = platform_device_register_simple("telco_clock",
-1, NULL, 0);
if (IS_ERR(tlclk_device)) {
printk(KERN_ERR "tlclk: platform_device_register failed.\n");
ret = PTR_ERR(tlclk_device);
goto out4;
}
ret = sysfs_create_group(&tlclk_device->dev.kobj,
&tlclk_attribute_group);
if (ret) {
printk(KERN_ERR "tlclk: failed to create sysfs device attributes.\n");
goto out5;
}
return 0;
out5:
platform_device_unregister(tlclk_device);
out4:
misc_deregister(&tlclk_miscdev);
out3:
release_region(TLCLK_BASE, 8);
out2:
kfree(alarm_events);
unregister_chrdev(tlclk_major, "telco_clock");
out1:
return ret;
}
static void __exit tlclk_cleanup(void)
{
sysfs_remove_group(&tlclk_device->dev.kobj, &tlclk_attribute_group);
platform_device_unregister(tlclk_device);
misc_deregister(&tlclk_miscdev);
unregister_chrdev(tlclk_major, "telco_clock");
release_region(TLCLK_BASE, 8);
del_timer_sync(&switchover_timer);
kfree(alarm_events);
}
static void switchover_timeout(struct timer_list *unused)
{
unsigned long flags = tlclk_timer_data;
if ((flags & 1)) {
if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
alarm_events->switchover_primary++;
} else {
if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08))
alarm_events->switchover_secondary++;
}
/* Alarm processing is done, wake up read task */
del_timer(&switchover_timer);
got_event = 1;
wake_up(&wq);
}
static irqreturn_t tlclk_interrupt(int irq, void *dev_id)
{
unsigned long flags;
spin_lock_irqsave(&event_lock, flags);
/* Read and clear interrupt events */
int_events = inb(TLCLK_REG6);
/* Primary_Los changed from 0 to 1 ? */
if (int_events & PRI_LOS_01_MASK) {
if (inb(TLCLK_REG2) & SEC_LOST_MASK)
alarm_events->lost_clocks++;
else
alarm_events->lost_primary_clock++;
}
/* Primary_Los changed from 1 to 0 ? */
if (int_events & PRI_LOS_10_MASK) {
alarm_events->primary_clock_back++;
SET_PORT_BITS(TLCLK_REG1, 0xFE, 1);
}
/* Secondary_Los changed from 0 to 1 ? */
if (int_events & SEC_LOS_01_MASK) {
if (inb(TLCLK_REG2) & PRI_LOST_MASK)
alarm_events->lost_clocks++;
else
alarm_events->lost_secondary_clock++;
}
/* Secondary_Los changed from 1 to 0 ? */
if (int_events & SEC_LOS_10_MASK) {
alarm_events->secondary_clock_back++;
SET_PORT_BITS(TLCLK_REG1, 0xFE, 0);
}
if (int_events & HOLDOVER_10_MASK)
alarm_events->pll_end_holdover++;
if (int_events & UNLOCK_01_MASK)
alarm_events->pll_lost_sync++;
if (int_events & UNLOCK_10_MASK)
alarm_events->pll_sync++;
/* Holdover changed from 0 to 1 ? */
if (int_events & HOLDOVER_01_MASK) {
alarm_events->pll_holdover++;
/* TIMEOUT in ~10ms */
switchover_timer.expires = jiffies + msecs_to_jiffies(10);
tlclk_timer_data = inb(TLCLK_REG1);
mod_timer(&switchover_timer, switchover_timer.expires);
} else {
got_event = 1;
wake_up(&wq);
}
spin_unlock_irqrestore(&event_lock, flags);
return IRQ_HANDLED;
}
module_init(tlclk_init);
module_exit(tlclk_cleanup);
|
linux-master
|
drivers/char/tlclk.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Sony Programmable I/O Control Device driver for VAIO
*
* Copyright (C) 2007 Mattia Dongili <[email protected]>
*
* Copyright (C) 2001-2005 Stelian Pop <[email protected]>
*
* Copyright (C) 2005 Narayanan R S <[email protected]>
*
* Copyright (C) 2001-2002 Alcôve <www.alcove.com>
*
* Copyright (C) 2001 Michael Ashley <[email protected]>
*
* Copyright (C) 2001 Junichi Morita <[email protected]>
*
* Copyright (C) 2000 Takaya Kinjo <[email protected]>
*
* Copyright (C) 2000 Andrew Tridgell <[email protected]>
*
* Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/input.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/poll.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <linux/sonypi.h>
#define SONYPI_DRIVER_VERSION "1.26"
MODULE_AUTHOR("Stelian Pop <[email protected]>");
MODULE_DESCRIPTION("Sony Programmable I/O Control Device driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SONYPI_DRIVER_VERSION);
static int minor = -1;
module_param(minor, int, 0);
MODULE_PARM_DESC(minor,
"minor number of the misc device, default is -1 (automatic)");
static int verbose; /* = 0 */
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "be verbose, default is 0 (no)");
static int fnkeyinit; /* = 0 */
module_param(fnkeyinit, int, 0444);
MODULE_PARM_DESC(fnkeyinit,
"set this if your Fn keys do not generate any event");
static int camera; /* = 0 */
module_param(camera, int, 0444);
MODULE_PARM_DESC(camera,
"set this if you have a MotionEye camera (PictureBook series)");
static int compat; /* = 0 */
module_param(compat, int, 0444);
MODULE_PARM_DESC(compat,
"set this if you want to enable backward compatibility mode");
static unsigned long mask = 0xffffffff;
module_param(mask, ulong, 0644);
MODULE_PARM_DESC(mask,
"set this to the mask of event you want to enable (see doc)");
static int useinput = 1;
module_param(useinput, int, 0444);
MODULE_PARM_DESC(useinput,
"set this if you would like sonypi to feed events to the input subsystem");
static int check_ioport = 1;
module_param(check_ioport, int, 0444);
MODULE_PARM_DESC(check_ioport,
"set this to 0 if you think the automatic ioport check for sony-laptop is wrong");
#define SONYPI_DEVICE_MODEL_TYPE1 1
#define SONYPI_DEVICE_MODEL_TYPE2 2
#define SONYPI_DEVICE_MODEL_TYPE3 3
/* type1 models use those */
#define SONYPI_IRQ_PORT 0x8034
#define SONYPI_IRQ_SHIFT 22
#define SONYPI_TYPE1_BASE 0x50
#define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14)
#define SONYPI_TYPE1_REGION_SIZE 0x08
#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04
/* type2 series specifics */
#define SONYPI_SIRQ 0x9b
#define SONYPI_SLOB 0x9c
#define SONYPI_SHIB 0x9d
#define SONYPI_TYPE2_REGION_SIZE 0x20
#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12
/* type3 series specifics */
#define SONYPI_TYPE3_BASE 0x40
#define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */
#define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */
#define SONYPI_TYPE3_REGION_SIZE 0x20
#define SONYPI_TYPE3_EVTYPE_OFFSET 0x12
/* battery / brightness addresses */
#define SONYPI_BAT_FLAGS 0x81
#define SONYPI_LCD_LIGHT 0x96
#define SONYPI_BAT1_PCTRM 0xa0
#define SONYPI_BAT1_LEFT 0xa2
#define SONYPI_BAT1_MAXRT 0xa4
#define SONYPI_BAT2_PCTRM 0xa8
#define SONYPI_BAT2_LEFT 0xaa
#define SONYPI_BAT2_MAXRT 0xac
#define SONYPI_BAT1_MAXTK 0xb0
#define SONYPI_BAT1_FULL 0xb2
#define SONYPI_BAT2_MAXTK 0xb8
#define SONYPI_BAT2_FULL 0xba
/* FAN0 information (reverse engineered from ACPI tables) */
#define SONYPI_FAN0_STATUS 0x93
#define SONYPI_TEMP_STATUS 0xC1
/* ioports used for brightness and type2 events */
#define SONYPI_DATA_IOPORT 0x62
#define SONYPI_CST_IOPORT 0x66
/* The set of possible ioports */
struct sonypi_ioport_list {
u16 port1;
u16 port2;
};
static struct sonypi_ioport_list sonypi_type1_ioport_list[] = {
{ 0x10c0, 0x10c4 }, /* looks like the default on C1Vx */
{ 0x1080, 0x1084 },
{ 0x1090, 0x1094 },
{ 0x10a0, 0x10a4 },
{ 0x10b0, 0x10b4 },
{ 0x0, 0x0 }
};
static struct sonypi_ioport_list sonypi_type2_ioport_list[] = {
{ 0x1080, 0x1084 },
{ 0x10a0, 0x10a4 },
{ 0x10c0, 0x10c4 },
{ 0x10e0, 0x10e4 },
{ 0x0, 0x0 }
};
/* same as in type 2 models */
static struct sonypi_ioport_list *sonypi_type3_ioport_list =
sonypi_type2_ioport_list;
/* The set of possible interrupts */
struct sonypi_irq_list {
u16 irq;
u16 bits;
};
static struct sonypi_irq_list sonypi_type1_irq_list[] = {
{ 11, 0x2 }, /* IRQ 11, GO22=0,GO23=1 in AML */
{ 10, 0x1 }, /* IRQ 10, GO22=1,GO23=0 in AML */
{ 5, 0x0 }, /* IRQ 5, GO22=0,GO23=0 in AML */
{ 0, 0x3 } /* no IRQ, GO22=1,GO23=1 in AML */
};
static struct sonypi_irq_list sonypi_type2_irq_list[] = {
{ 11, 0x80 }, /* IRQ 11, 0x80 in SIRQ in AML */
{ 10, 0x40 }, /* IRQ 10, 0x40 in SIRQ in AML */
{ 9, 0x20 }, /* IRQ 9, 0x20 in SIRQ in AML */
{ 6, 0x10 }, /* IRQ 6, 0x10 in SIRQ in AML */
{ 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */
};
/* same as in type2 models */
static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list;
#define SONYPI_CAMERA_BRIGHTNESS 0
#define SONYPI_CAMERA_CONTRAST 1
#define SONYPI_CAMERA_HUE 2
#define SONYPI_CAMERA_COLOR 3
#define SONYPI_CAMERA_SHARPNESS 4
#define SONYPI_CAMERA_PICTURE 5
#define SONYPI_CAMERA_EXPOSURE_MASK 0xC
#define SONYPI_CAMERA_WHITE_BALANCE_MASK 0x3
#define SONYPI_CAMERA_PICTURE_MODE_MASK 0x30
#define SONYPI_CAMERA_MUTE_MASK 0x40
/* the rest don't need a loop until not 0xff */
#define SONYPI_CAMERA_AGC 6
#define SONYPI_CAMERA_AGC_MASK 0x30
#define SONYPI_CAMERA_SHUTTER_MASK 0x7
#define SONYPI_CAMERA_SHUTDOWN_REQUEST 7
#define SONYPI_CAMERA_CONTROL 0x10
#define SONYPI_CAMERA_STATUS 7
#define SONYPI_CAMERA_STATUS_READY 0x2
#define SONYPI_CAMERA_STATUS_POSITION 0x4
#define SONYPI_DIRECTION_BACKWARDS 0x4
#define SONYPI_CAMERA_REVISION 8
#define SONYPI_CAMERA_ROMVERSION 9
/* Event masks */
#define SONYPI_JOGGER_MASK 0x00000001
#define SONYPI_CAPTURE_MASK 0x00000002
#define SONYPI_FNKEY_MASK 0x00000004
#define SONYPI_BLUETOOTH_MASK 0x00000008
#define SONYPI_PKEY_MASK 0x00000010
#define SONYPI_BACK_MASK 0x00000020
#define SONYPI_HELP_MASK 0x00000040
#define SONYPI_LID_MASK 0x00000080
#define SONYPI_ZOOM_MASK 0x00000100
#define SONYPI_THUMBPHRASE_MASK 0x00000200
#define SONYPI_MEYE_MASK 0x00000400
#define SONYPI_MEMORYSTICK_MASK 0x00000800
#define SONYPI_BATTERY_MASK 0x00001000
#define SONYPI_WIRELESS_MASK 0x00002000
struct sonypi_event {
u8 data;
u8 event;
};
/* The set of possible button release events */
static struct sonypi_event sonypi_releaseev[] = {
{ 0x00, SONYPI_EVENT_ANYBUTTON_RELEASED },
{ 0, 0 }
};
/* The set of possible jogger events */
static struct sonypi_event sonypi_joggerev[] = {
{ 0x1f, SONYPI_EVENT_JOGDIAL_UP },
{ 0x01, SONYPI_EVENT_JOGDIAL_DOWN },
{ 0x5f, SONYPI_EVENT_JOGDIAL_UP_PRESSED },
{ 0x41, SONYPI_EVENT_JOGDIAL_DOWN_PRESSED },
{ 0x1e, SONYPI_EVENT_JOGDIAL_FAST_UP },
{ 0x02, SONYPI_EVENT_JOGDIAL_FAST_DOWN },
{ 0x5e, SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED },
{ 0x42, SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED },
{ 0x1d, SONYPI_EVENT_JOGDIAL_VFAST_UP },
{ 0x03, SONYPI_EVENT_JOGDIAL_VFAST_DOWN },
{ 0x5d, SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED },
{ 0x43, SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED },
{ 0x40, SONYPI_EVENT_JOGDIAL_PRESSED },
{ 0, 0 }
};
/* The set of possible capture button events */
static struct sonypi_event sonypi_captureev[] = {
{ 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
{ 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
{ 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
{ 0, 0 }
};
/* The set of possible fnkeys events */
static struct sonypi_event sonypi_fnkeyev[] = {
{ 0x10, SONYPI_EVENT_FNKEY_ESC },
{ 0x11, SONYPI_EVENT_FNKEY_F1 },
{ 0x12, SONYPI_EVENT_FNKEY_F2 },
{ 0x13, SONYPI_EVENT_FNKEY_F3 },
{ 0x14, SONYPI_EVENT_FNKEY_F4 },
{ 0x15, SONYPI_EVENT_FNKEY_F5 },
{ 0x16, SONYPI_EVENT_FNKEY_F6 },
{ 0x17, SONYPI_EVENT_FNKEY_F7 },
{ 0x18, SONYPI_EVENT_FNKEY_F8 },
{ 0x19, SONYPI_EVENT_FNKEY_F9 },
{ 0x1a, SONYPI_EVENT_FNKEY_F10 },
{ 0x1b, SONYPI_EVENT_FNKEY_F11 },
{ 0x1c, SONYPI_EVENT_FNKEY_F12 },
{ 0x1f, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x21, SONYPI_EVENT_FNKEY_1 },
{ 0x22, SONYPI_EVENT_FNKEY_2 },
{ 0x31, SONYPI_EVENT_FNKEY_D },
{ 0x32, SONYPI_EVENT_FNKEY_E },
{ 0x33, SONYPI_EVENT_FNKEY_F },
{ 0x34, SONYPI_EVENT_FNKEY_S },
{ 0x35, SONYPI_EVENT_FNKEY_B },
{ 0x36, SONYPI_EVENT_FNKEY_ONLY },
{ 0, 0 }
};
/* The set of possible program key events */
static struct sonypi_event sonypi_pkeyev[] = {
{ 0x01, SONYPI_EVENT_PKEY_P1 },
{ 0x02, SONYPI_EVENT_PKEY_P2 },
{ 0x04, SONYPI_EVENT_PKEY_P3 },
{ 0x5c, SONYPI_EVENT_PKEY_P1 },
{ 0, 0 }
};
/* The set of possible bluetooth events */
static struct sonypi_event sonypi_blueev[] = {
{ 0x55, SONYPI_EVENT_BLUETOOTH_PRESSED },
{ 0x59, SONYPI_EVENT_BLUETOOTH_ON },
{ 0x5a, SONYPI_EVENT_BLUETOOTH_OFF },
{ 0, 0 }
};
/* The set of possible wireless events */
static struct sonypi_event sonypi_wlessev[] = {
{ 0x59, SONYPI_EVENT_WIRELESS_ON },
{ 0x5a, SONYPI_EVENT_WIRELESS_OFF },
{ 0, 0 }
};
/* The set of possible back button events */
static struct sonypi_event sonypi_backev[] = {
{ 0x20, SONYPI_EVENT_BACK_PRESSED },
{ 0, 0 }
};
/* The set of possible help button events */
static struct sonypi_event sonypi_helpev[] = {
{ 0x3b, SONYPI_EVENT_HELP_PRESSED },
{ 0, 0 }
};
/* The set of possible lid events */
static struct sonypi_event sonypi_lidev[] = {
{ 0x51, SONYPI_EVENT_LID_CLOSED },
{ 0x50, SONYPI_EVENT_LID_OPENED },
{ 0, 0 }
};
/* The set of possible zoom events */
static struct sonypi_event sonypi_zoomev[] = {
{ 0x39, SONYPI_EVENT_ZOOM_PRESSED },
{ 0, 0 }
};
/* The set of possible thumbphrase events */
static struct sonypi_event sonypi_thumbphraseev[] = {
{ 0x3a, SONYPI_EVENT_THUMBPHRASE_PRESSED },
{ 0, 0 }
};
/* The set of possible motioneye camera events */
static struct sonypi_event sonypi_meyeev[] = {
{ 0x00, SONYPI_EVENT_MEYE_FACE },
{ 0x01, SONYPI_EVENT_MEYE_OPPOSITE },
{ 0, 0 }
};
/* The set of possible memorystick events */
static struct sonypi_event sonypi_memorystickev[] = {
{ 0x53, SONYPI_EVENT_MEMORYSTICK_INSERT },
{ 0x54, SONYPI_EVENT_MEMORYSTICK_EJECT },
{ 0, 0 }
};
/* The set of possible battery events */
static struct sonypi_event sonypi_batteryev[] = {
{ 0x20, SONYPI_EVENT_BATTERY_INSERT },
{ 0x30, SONYPI_EVENT_BATTERY_REMOVE },
{ 0, 0 }
};
static struct sonypi_eventtypes {
int model;
u8 data;
unsigned long mask;
struct sonypi_event * events;
} sonypi_eventtypes[] = {
{ SONYPI_DEVICE_MODEL_TYPE1, 0, 0xffffffff, sonypi_releaseev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
{ SONYPI_DEVICE_MODEL_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0, 0xffffffff, sonypi_releaseev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev },
{ SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
{ SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
{ SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
{ SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
{ SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
{ 0 }
};
#define SONYPI_BUF_SIZE 128
/* Correspondance table between sonypi events and input layer events */
static struct {
int sonypiev;
int inputev;
} sonypi_inputkeys[] = {
{ SONYPI_EVENT_CAPTURE_PRESSED, KEY_CAMERA },
{ SONYPI_EVENT_FNKEY_ONLY, KEY_FN },
{ SONYPI_EVENT_FNKEY_ESC, KEY_FN_ESC },
{ SONYPI_EVENT_FNKEY_F1, KEY_FN_F1 },
{ SONYPI_EVENT_FNKEY_F2, KEY_FN_F2 },
{ SONYPI_EVENT_FNKEY_F3, KEY_FN_F3 },
{ SONYPI_EVENT_FNKEY_F4, KEY_FN_F4 },
{ SONYPI_EVENT_FNKEY_F5, KEY_FN_F5 },
{ SONYPI_EVENT_FNKEY_F6, KEY_FN_F6 },
{ SONYPI_EVENT_FNKEY_F7, KEY_FN_F7 },
{ SONYPI_EVENT_FNKEY_F8, KEY_FN_F8 },
{ SONYPI_EVENT_FNKEY_F9, KEY_FN_F9 },
{ SONYPI_EVENT_FNKEY_F10, KEY_FN_F10 },
{ SONYPI_EVENT_FNKEY_F11, KEY_FN_F11 },
{ SONYPI_EVENT_FNKEY_F12, KEY_FN_F12 },
{ SONYPI_EVENT_FNKEY_1, KEY_FN_1 },
{ SONYPI_EVENT_FNKEY_2, KEY_FN_2 },
{ SONYPI_EVENT_FNKEY_D, KEY_FN_D },
{ SONYPI_EVENT_FNKEY_E, KEY_FN_E },
{ SONYPI_EVENT_FNKEY_F, KEY_FN_F },
{ SONYPI_EVENT_FNKEY_S, KEY_FN_S },
{ SONYPI_EVENT_FNKEY_B, KEY_FN_B },
{ SONYPI_EVENT_BLUETOOTH_PRESSED, KEY_BLUE },
{ SONYPI_EVENT_BLUETOOTH_ON, KEY_BLUE },
{ SONYPI_EVENT_PKEY_P1, KEY_PROG1 },
{ SONYPI_EVENT_PKEY_P2, KEY_PROG2 },
{ SONYPI_EVENT_PKEY_P3, KEY_PROG3 },
{ SONYPI_EVENT_BACK_PRESSED, KEY_BACK },
{ SONYPI_EVENT_HELP_PRESSED, KEY_HELP },
{ SONYPI_EVENT_ZOOM_PRESSED, KEY_ZOOM },
{ SONYPI_EVENT_THUMBPHRASE_PRESSED, BTN_THUMB },
{ 0, 0 },
};
struct sonypi_keypress {
struct input_dev *dev;
int key;
};
static struct sonypi_device {
struct pci_dev *dev;
u16 irq;
u16 bits;
u16 ioport1;
u16 ioport2;
u16 region_size;
u16 evtype_offset;
int camera_power;
int bluetooth_power;
struct mutex lock;
struct kfifo fifo;
spinlock_t fifo_lock;
wait_queue_head_t fifo_proc_list;
struct fasync_struct *fifo_async;
int open_count;
int model;
struct input_dev *input_jog_dev;
struct input_dev *input_key_dev;
struct work_struct input_work;
struct kfifo input_fifo;
spinlock_t input_fifo_lock;
} sonypi_device;
#define ITERATIONS_LONG 10000
#define ITERATIONS_SHORT 10
#define wait_on_command(quiet, command, iterations) { \
unsigned int n = iterations; \
while (--n && (command)) \
udelay(1); \
if (!n && (verbose || !quiet)) \
printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \
}
#ifdef CONFIG_ACPI
#define SONYPI_ACPI_ACTIVE (!acpi_disabled)
#else
#define SONYPI_ACPI_ACTIVE 0
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI
static struct acpi_device *sonypi_acpi_device;
static int acpi_driver_registered;
#endif
static int sonypi_ec_write(u8 addr, u8 value)
{
#ifdef CONFIG_ACPI
if (SONYPI_ACPI_ACTIVE)
return ec_write(addr, value);
#endif
wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG);
outb_p(0x81, SONYPI_CST_IOPORT);
wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
outb_p(addr, SONYPI_DATA_IOPORT);
wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
outb_p(value, SONYPI_DATA_IOPORT);
wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
return 0;
}
static int sonypi_ec_read(u8 addr, u8 *value)
{
#ifdef CONFIG_ACPI
if (SONYPI_ACPI_ACTIVE)
return ec_read(addr, value);
#endif
wait_on_command(1, inb_p(SONYPI_CST_IOPORT) & 3, ITERATIONS_LONG);
outb_p(0x80, SONYPI_CST_IOPORT);
wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
outb_p(addr, SONYPI_DATA_IOPORT);
wait_on_command(0, inb_p(SONYPI_CST_IOPORT) & 2, ITERATIONS_LONG);
*value = inb_p(SONYPI_DATA_IOPORT);
return 0;
}
static int ec_read16(u8 addr, u16 *value)
{
u8 val_lb, val_hb;
if (sonypi_ec_read(addr, &val_lb))
return -1;
if (sonypi_ec_read(addr + 1, &val_hb))
return -1;
*value = val_lb | (val_hb << 8);
return 0;
}
/* Initializes the device - this comes from the AML code in the ACPI bios */
static void sonypi_type1_srs(void)
{
u32 v;
pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
v = (v & 0xFFFF0000) | ((u32) sonypi_device.ioport1);
pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
v = (v & 0xFFF0FFFF) |
(((u32) sonypi_device.ioport1 ^ sonypi_device.ioport2) << 16);
pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
v = inl(SONYPI_IRQ_PORT);
v &= ~(((u32) 0x3) << SONYPI_IRQ_SHIFT);
v |= (((u32) sonypi_device.bits) << SONYPI_IRQ_SHIFT);
outl(v, SONYPI_IRQ_PORT);
pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
v = (v & 0xFF1FFFFF) | 0x00C00000;
pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
}
static void sonypi_type2_srs(void)
{
if (sonypi_ec_write(SONYPI_SHIB, (sonypi_device.ioport1 & 0xFF00) >> 8))
printk(KERN_WARNING "ec_write failed\n");
if (sonypi_ec_write(SONYPI_SLOB, sonypi_device.ioport1 & 0x00FF))
printk(KERN_WARNING "ec_write failed\n");
if (sonypi_ec_write(SONYPI_SIRQ, sonypi_device.bits))
printk(KERN_WARNING "ec_write failed\n");
udelay(10);
}
static void sonypi_type3_srs(void)
{
u16 v16;
u8 v8;
/* This model type uses the same initialization of
* the embedded controller as the type2 models. */
sonypi_type2_srs();
/* Initialization of PCI config space of the LPC interface bridge. */
v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01;
pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16);
pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8);
v8 = (v8 & 0xCF) | 0x10;
pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8);
}
/* Disables the device - this comes from the AML code in the ACPI bios */
static void sonypi_type1_dis(void)
{
u32 v;
pci_read_config_dword(sonypi_device.dev, SONYPI_G10A, &v);
v = v & 0xFF3FFFFF;
pci_write_config_dword(sonypi_device.dev, SONYPI_G10A, v);
v = inl(SONYPI_IRQ_PORT);
v |= (0x3 << SONYPI_IRQ_SHIFT);
outl(v, SONYPI_IRQ_PORT);
}
static void sonypi_type2_dis(void)
{
if (sonypi_ec_write(SONYPI_SHIB, 0))
printk(KERN_WARNING "ec_write failed\n");
if (sonypi_ec_write(SONYPI_SLOB, 0))
printk(KERN_WARNING "ec_write failed\n");
if (sonypi_ec_write(SONYPI_SIRQ, 0))
printk(KERN_WARNING "ec_write failed\n");
}
static void sonypi_type3_dis(void)
{
sonypi_type2_dis();
udelay(10);
pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0);
}
static u8 sonypi_call1(u8 dev)
{
u8 v1, v2;
wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
outb(dev, sonypi_device.ioport2);
v1 = inb_p(sonypi_device.ioport2);
v2 = inb_p(sonypi_device.ioport1);
return v2;
}
static u8 sonypi_call2(u8 dev, u8 fn)
{
u8 v1;
wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
outb(dev, sonypi_device.ioport2);
wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
outb(fn, sonypi_device.ioport1);
v1 = inb_p(sonypi_device.ioport1);
return v1;
}
static u8 sonypi_call3(u8 dev, u8 fn, u8 v)
{
u8 v1;
wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
outb(dev, sonypi_device.ioport2);
wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
outb(fn, sonypi_device.ioport1);
wait_on_command(0, inb_p(sonypi_device.ioport2) & 2, ITERATIONS_LONG);
outb(v, sonypi_device.ioport1);
v1 = inb_p(sonypi_device.ioport1);
return v1;
}
#if 0
/* Get brightness, hue etc. Unreliable... */
static u8 sonypi_read(u8 fn)
{
u8 v1, v2;
int n = 100;
while (n--) {
v1 = sonypi_call2(0x8f, fn);
v2 = sonypi_call2(0x8f, fn);
if (v1 == v2 && v1 != 0xff)
return v1;
}
return 0xff;
}
#endif
/* Set brightness, hue etc */
static void sonypi_set(u8 fn, u8 v)
{
wait_on_command(0, sonypi_call3(0x90, fn, v), ITERATIONS_SHORT);
}
/* Tests if the camera is ready */
static int sonypi_camera_ready(void)
{
u8 v;
v = sonypi_call2(0x8f, SONYPI_CAMERA_STATUS);
return (v != 0xff && (v & SONYPI_CAMERA_STATUS_READY));
}
/* Turns the camera off */
static void sonypi_camera_off(void)
{
sonypi_set(SONYPI_CAMERA_PICTURE, SONYPI_CAMERA_MUTE_MASK);
if (!sonypi_device.camera_power)
return;
sonypi_call2(0x91, 0);
sonypi_device.camera_power = 0;
}
/* Turns the camera on */
static void sonypi_camera_on(void)
{
int i, j;
if (sonypi_device.camera_power)
return;
for (j = 5; j > 0; j--) {
while (sonypi_call2(0x91, 0x1))
msleep(10);
sonypi_call1(0x93);
for (i = 400; i > 0; i--) {
if (sonypi_camera_ready())
break;
msleep(10);
}
if (i)
break;
}
if (j == 0) {
printk(KERN_WARNING "sonypi: failed to power on camera\n");
return;
}
sonypi_set(0x10, 0x5a);
sonypi_device.camera_power = 1;
}
/* sets the bluetooth subsystem power state */
static void sonypi_setbluetoothpower(u8 state)
{
state = !!state;
if (sonypi_device.bluetooth_power == state)
return;
sonypi_call2(0x96, state);
sonypi_call1(0x82);
sonypi_device.bluetooth_power = state;
}
static void input_keyrelease(struct work_struct *work)
{
struct sonypi_keypress kp;
while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp,
sizeof(kp), &sonypi_device.input_fifo_lock)
== sizeof(kp)) {
msleep(10);
input_report_key(kp.dev, kp.key, 0);
input_sync(kp.dev);
}
}
static void sonypi_report_input_event(u8 event)
{
struct input_dev *jog_dev = sonypi_device.input_jog_dev;
struct input_dev *key_dev = sonypi_device.input_key_dev;
struct sonypi_keypress kp = { NULL };
int i;
switch (event) {
case SONYPI_EVENT_JOGDIAL_UP:
case SONYPI_EVENT_JOGDIAL_UP_PRESSED:
input_report_rel(jog_dev, REL_WHEEL, 1);
input_sync(jog_dev);
break;
case SONYPI_EVENT_JOGDIAL_DOWN:
case SONYPI_EVENT_JOGDIAL_DOWN_PRESSED:
input_report_rel(jog_dev, REL_WHEEL, -1);
input_sync(jog_dev);
break;
case SONYPI_EVENT_JOGDIAL_PRESSED:
kp.key = BTN_MIDDLE;
kp.dev = jog_dev;
break;
case SONYPI_EVENT_FNKEY_RELEASED:
/* Nothing, not all VAIOs generate this event */
break;
default:
for (i = 0; sonypi_inputkeys[i].sonypiev; i++)
if (event == sonypi_inputkeys[i].sonypiev) {
kp.dev = key_dev;
kp.key = sonypi_inputkeys[i].inputev;
break;
}
break;
}
if (kp.dev) {
input_report_key(kp.dev, kp.key, 1);
input_sync(kp.dev);
kfifo_in_locked(&sonypi_device.input_fifo,
(unsigned char *)&kp, sizeof(kp),
&sonypi_device.input_fifo_lock);
schedule_work(&sonypi_device.input_work);
}
}
/* Interrupt handler: some event is available */
static irqreturn_t sonypi_irq(int irq, void *dev_id)
{
u8 v1, v2, event = 0;
int i, j;
v1 = inb_p(sonypi_device.ioport1);
v2 = inb_p(sonypi_device.ioport1 + sonypi_device.evtype_offset);
for (i = 0; sonypi_eventtypes[i].model; i++) {
if (sonypi_device.model != sonypi_eventtypes[i].model)
continue;
if ((v2 & sonypi_eventtypes[i].data) !=
sonypi_eventtypes[i].data)
continue;
if (!(mask & sonypi_eventtypes[i].mask))
continue;
for (j = 0; sonypi_eventtypes[i].events[j].event; j++) {
if (v1 == sonypi_eventtypes[i].events[j].data) {
event = sonypi_eventtypes[i].events[j].event;
goto found;
}
}
}
if (verbose)
printk(KERN_WARNING
"sonypi: unknown event port1=0x%02x,port2=0x%02x\n",
v1, v2);
/* We need to return IRQ_HANDLED here because there *are*
* events belonging to the sonypi device we don't know about,
* but we still don't want those to pollute the logs... */
return IRQ_HANDLED;
found:
if (verbose > 1)
printk(KERN_INFO
"sonypi: event port1=0x%02x,port2=0x%02x\n", v1, v2);
if (useinput)
sonypi_report_input_event(event);
kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
sizeof(event), &sonypi_device.fifo_lock);
kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_device.fifo_proc_list);
return IRQ_HANDLED;
}
static int sonypi_misc_fasync(int fd, struct file *filp, int on)
{
return fasync_helper(fd, filp, on, &sonypi_device.fifo_async);
}
static int sonypi_misc_release(struct inode *inode, struct file *file)
{
mutex_lock(&sonypi_device.lock);
sonypi_device.open_count--;
mutex_unlock(&sonypi_device.lock);
return 0;
}
static int sonypi_misc_open(struct inode *inode, struct file *file)
{
mutex_lock(&sonypi_device.lock);
/* Flush input queue on first open */
if (!sonypi_device.open_count)
kfifo_reset(&sonypi_device.fifo);
sonypi_device.open_count++;
mutex_unlock(&sonypi_device.lock);
return 0;
}
static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
ssize_t ret;
unsigned char c;
if ((kfifo_len(&sonypi_device.fifo) == 0) &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
ret = wait_event_interruptible(sonypi_device.fifo_proc_list,
kfifo_len(&sonypi_device.fifo) != 0);
if (ret)
return ret;
while (ret < count &&
(kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c),
&sonypi_device.fifo_lock) == sizeof(c))) {
if (put_user(c, buf++))
return -EFAULT;
ret++;
}
if (ret > 0) {
struct inode *inode = file_inode(file);
inode->i_atime = current_time(inode);
}
return ret;
}
static __poll_t sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_device.fifo_proc_list, wait);
if (kfifo_len(&sonypi_device.fifo))
return EPOLLIN | EPOLLRDNORM;
return 0;
}
static long sonypi_misc_ioctl(struct file *fp,
unsigned int cmd, unsigned long arg)
{
long ret = 0;
void __user *argp = (void __user *)arg;
u8 val8;
u16 val16;
mutex_lock(&sonypi_device.lock);
switch (cmd) {
case SONYPI_IOCGBRT:
if (sonypi_ec_read(SONYPI_LCD_LIGHT, &val8)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
case SONYPI_IOCSBRT:
if (copy_from_user(&val8, argp, sizeof(val8))) {
ret = -EFAULT;
break;
}
if (sonypi_ec_write(SONYPI_LCD_LIGHT, val8))
ret = -EIO;
break;
case SONYPI_IOCGBAT1CAP:
if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val16, sizeof(val16)))
ret = -EFAULT;
break;
case SONYPI_IOCGBAT1REM:
if (ec_read16(SONYPI_BAT1_LEFT, &val16)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val16, sizeof(val16)))
ret = -EFAULT;
break;
case SONYPI_IOCGBAT2CAP:
if (ec_read16(SONYPI_BAT2_FULL, &val16)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val16, sizeof(val16)))
ret = -EFAULT;
break;
case SONYPI_IOCGBAT2REM:
if (ec_read16(SONYPI_BAT2_LEFT, &val16)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val16, sizeof(val16)))
ret = -EFAULT;
break;
case SONYPI_IOCGBATFLAGS:
if (sonypi_ec_read(SONYPI_BAT_FLAGS, &val8)) {
ret = -EIO;
break;
}
val8 &= 0x07;
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
case SONYPI_IOCGBLUE:
val8 = sonypi_device.bluetooth_power;
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
case SONYPI_IOCSBLUE:
if (copy_from_user(&val8, argp, sizeof(val8))) {
ret = -EFAULT;
break;
}
sonypi_setbluetoothpower(val8);
break;
/* FAN Controls */
case SONYPI_IOCGFAN:
if (sonypi_ec_read(SONYPI_FAN0_STATUS, &val8)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
case SONYPI_IOCSFAN:
if (copy_from_user(&val8, argp, sizeof(val8))) {
ret = -EFAULT;
break;
}
if (sonypi_ec_write(SONYPI_FAN0_STATUS, val8))
ret = -EIO;
break;
/* GET Temperature (useful under APM) */
case SONYPI_IOCGTEMP:
if (sonypi_ec_read(SONYPI_TEMP_STATUS, &val8)) {
ret = -EIO;
break;
}
if (copy_to_user(argp, &val8, sizeof(val8)))
ret = -EFAULT;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&sonypi_device.lock);
return ret;
}
static const struct file_operations sonypi_misc_fops = {
.owner = THIS_MODULE,
.read = sonypi_misc_read,
.poll = sonypi_misc_poll,
.open = sonypi_misc_open,
.release = sonypi_misc_release,
.fasync = sonypi_misc_fasync,
.unlocked_ioctl = sonypi_misc_ioctl,
.llseek = no_llseek,
};
static struct miscdevice sonypi_misc_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "sonypi",
.fops = &sonypi_misc_fops,
};
static void sonypi_enable(unsigned int camera_on)
{
switch (sonypi_device.model) {
case SONYPI_DEVICE_MODEL_TYPE1:
sonypi_type1_srs();
break;
case SONYPI_DEVICE_MODEL_TYPE2:
sonypi_type2_srs();
break;
case SONYPI_DEVICE_MODEL_TYPE3:
sonypi_type3_srs();
break;
}
sonypi_call1(0x82);
sonypi_call2(0x81, 0xff);
sonypi_call1(compat ? 0x92 : 0x82);
/* Enable ACPI mode to get Fn key events */
if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
outb(0xf0, 0xb2);
if (camera && camera_on)
sonypi_camera_on();
}
static int sonypi_disable(void)
{
sonypi_call2(0x81, 0); /* make sure we don't get any more events */
if (camera)
sonypi_camera_off();
/* disable ACPI mode */
if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
outb(0xf1, 0xb2);
switch (sonypi_device.model) {
case SONYPI_DEVICE_MODEL_TYPE1:
sonypi_type1_dis();
break;
case SONYPI_DEVICE_MODEL_TYPE2:
sonypi_type2_dis();
break;
case SONYPI_DEVICE_MODEL_TYPE3:
sonypi_type3_dis();
break;
}
return 0;
}
#ifdef CONFIG_ACPI
static int sonypi_acpi_add(struct acpi_device *device)
{
sonypi_acpi_device = device;
strcpy(acpi_device_name(device), "Sony laptop hotkeys");
strcpy(acpi_device_class(device), "sony/hotkey");
return 0;
}
static void sonypi_acpi_remove(struct acpi_device *device)
{
sonypi_acpi_device = NULL;
}
static const struct acpi_device_id sonypi_device_ids[] = {
{"SNY6001", 0},
{"", 0},
};
static struct acpi_driver sonypi_acpi_driver = {
.name = "sonypi",
.class = "hkey",
.ids = sonypi_device_ids,
.ops = {
.add = sonypi_acpi_add,
.remove = sonypi_acpi_remove,
},
};
#endif
static int sonypi_create_input_devices(struct platform_device *pdev)
{
struct input_dev *jog_dev;
struct input_dev *key_dev;
int i;
int error;
sonypi_device.input_jog_dev = jog_dev = input_allocate_device();
if (!jog_dev)
return -ENOMEM;
jog_dev->name = "Sony Vaio Jogdial";
jog_dev->id.bustype = BUS_ISA;
jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
jog_dev->dev.parent = &pdev->dev;
jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE);
jog_dev->relbit[0] = BIT_MASK(REL_WHEEL);
sonypi_device.input_key_dev = key_dev = input_allocate_device();
if (!key_dev) {
error = -ENOMEM;
goto err_free_jogdev;
}
key_dev->name = "Sony Vaio Keys";
key_dev->id.bustype = BUS_ISA;
key_dev->id.vendor = PCI_VENDOR_ID_SONY;
key_dev->dev.parent = &pdev->dev;
/* Initialize the Input Drivers: special keys */
key_dev->evbit[0] = BIT_MASK(EV_KEY);
for (i = 0; sonypi_inputkeys[i].sonypiev; i++)
if (sonypi_inputkeys[i].inputev)
set_bit(sonypi_inputkeys[i].inputev, key_dev->keybit);
error = input_register_device(jog_dev);
if (error)
goto err_free_keydev;
error = input_register_device(key_dev);
if (error)
goto err_unregister_jogdev;
return 0;
err_unregister_jogdev:
input_unregister_device(jog_dev);
/* Set to NULL so we don't free it again below */
jog_dev = NULL;
err_free_keydev:
input_free_device(key_dev);
sonypi_device.input_key_dev = NULL;
err_free_jogdev:
input_free_device(jog_dev);
sonypi_device.input_jog_dev = NULL;
return error;
}
static int sonypi_setup_ioports(struct sonypi_device *dev,
const struct sonypi_ioport_list *ioport_list)
{
/* try to detect if sony-laptop is being used and thus
* has already requested one of the known ioports.
* As in the deprecated check_region this is racy has we have
* multiple ioports available and one of them can be requested
* between this check and the subsequent request. Anyway, as an
* attempt to be some more user-friendly as we currently are,
* this is enough.
*/
const struct sonypi_ioport_list *check = ioport_list;
while (check_ioport && check->port1) {
if (!request_region(check->port1,
sonypi_device.region_size,
"Sony Programmable I/O Device Check")) {
printk(KERN_ERR "sonypi: ioport 0x%.4x busy, using sony-laptop? "
"if not use check_ioport=0\n",
check->port1);
return -EBUSY;
}
release_region(check->port1, sonypi_device.region_size);
check++;
}
while (ioport_list->port1) {
if (request_region(ioport_list->port1,
sonypi_device.region_size,
"Sony Programmable I/O Device")) {
dev->ioport1 = ioport_list->port1;
dev->ioport2 = ioport_list->port2;
return 0;
}
ioport_list++;
}
return -EBUSY;
}
static int sonypi_setup_irq(struct sonypi_device *dev,
const struct sonypi_irq_list *irq_list)
{
while (irq_list->irq) {
if (!request_irq(irq_list->irq, sonypi_irq,
IRQF_SHARED, "sonypi", sonypi_irq)) {
dev->irq = irq_list->irq;
dev->bits = irq_list->bits;
return 0;
}
irq_list++;
}
return -EBUSY;
}
static void sonypi_display_info(void)
{
printk(KERN_INFO "sonypi: detected type%d model, "
"verbose = %d, fnkeyinit = %s, camera = %s, "
"compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
sonypi_device.model,
verbose,
fnkeyinit ? "on" : "off",
camera ? "on" : "off",
compat ? "on" : "off",
mask,
useinput ? "on" : "off",
SONYPI_ACPI_ACTIVE ? "on" : "off");
printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
sonypi_device.irq,
sonypi_device.ioport1, sonypi_device.ioport2);
if (minor == -1)
printk(KERN_INFO "sonypi: device allocated minor is %d\n",
sonypi_misc_device.minor);
}
static int sonypi_probe(struct platform_device *dev)
{
const struct sonypi_ioport_list *ioport_list;
const struct sonypi_irq_list *irq_list;
struct pci_dev *pcidev;
int error;
printk(KERN_WARNING "sonypi: please try the sony-laptop module instead "
"and report failures, see also "
"http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n");
spin_lock_init(&sonypi_device.fifo_lock);
error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL);
if (error) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
return error;
}
init_waitqueue_head(&sonypi_device.fifo_proc_list);
mutex_init(&sonypi_device.lock);
sonypi_device.bluetooth_power = -1;
if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1;
else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_ICH7_1, NULL)))
sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
else
sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2;
if (pcidev && pci_enable_device(pcidev)) {
printk(KERN_ERR "sonypi: pci_enable_device failed\n");
error = -EIO;
goto err_put_pcidev;
}
sonypi_device.dev = pcidev;
if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) {
ioport_list = sonypi_type1_ioport_list;
sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE;
sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET;
irq_list = sonypi_type1_irq_list;
} else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) {
ioport_list = sonypi_type2_ioport_list;
sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE;
sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET;
irq_list = sonypi_type2_irq_list;
} else {
ioport_list = sonypi_type3_ioport_list;
sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE;
sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET;
irq_list = sonypi_type3_irq_list;
}
error = sonypi_setup_ioports(&sonypi_device, ioport_list);
if (error) {
printk(KERN_ERR "sonypi: failed to request ioports\n");
goto err_disable_pcidev;
}
error = sonypi_setup_irq(&sonypi_device, irq_list);
if (error) {
printk(KERN_ERR "sonypi: request_irq failed\n");
goto err_free_ioports;
}
if (minor != -1)
sonypi_misc_device.minor = minor;
error = misc_register(&sonypi_misc_device);
if (error) {
printk(KERN_ERR "sonypi: misc_register failed\n");
goto err_free_irq;
}
sonypi_display_info();
if (useinput) {
error = sonypi_create_input_devices(dev);
if (error) {
printk(KERN_ERR
"sonypi: failed to create input devices\n");
goto err_miscdev_unregister;
}
spin_lock_init(&sonypi_device.input_fifo_lock);
error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE,
GFP_KERNEL);
if (error) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
goto err_inpdev_unregister;
}
INIT_WORK(&sonypi_device.input_work, input_keyrelease);
}
sonypi_enable(0);
return 0;
err_inpdev_unregister:
input_unregister_device(sonypi_device.input_key_dev);
input_unregister_device(sonypi_device.input_jog_dev);
err_miscdev_unregister:
misc_deregister(&sonypi_misc_device);
err_free_irq:
free_irq(sonypi_device.irq, sonypi_irq);
err_free_ioports:
release_region(sonypi_device.ioport1, sonypi_device.region_size);
err_disable_pcidev:
if (pcidev)
pci_disable_device(pcidev);
err_put_pcidev:
pci_dev_put(pcidev);
kfifo_free(&sonypi_device.fifo);
return error;
}
static int sonypi_remove(struct platform_device *dev)
{
sonypi_disable();
synchronize_irq(sonypi_device.irq);
flush_work(&sonypi_device.input_work);
if (useinput) {
input_unregister_device(sonypi_device.input_key_dev);
input_unregister_device(sonypi_device.input_jog_dev);
kfifo_free(&sonypi_device.input_fifo);
}
misc_deregister(&sonypi_misc_device);
free_irq(sonypi_device.irq, sonypi_irq);
release_region(sonypi_device.ioport1, sonypi_device.region_size);
if (sonypi_device.dev) {
pci_disable_device(sonypi_device.dev);
pci_dev_put(sonypi_device.dev);
}
kfifo_free(&sonypi_device.fifo);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int old_camera_power;
static int sonypi_suspend(struct device *dev)
{
old_camera_power = sonypi_device.camera_power;
sonypi_disable();
return 0;
}
static int sonypi_resume(struct device *dev)
{
sonypi_enable(old_camera_power);
return 0;
}
static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume);
#define SONYPI_PM (&sonypi_pm)
#else
#define SONYPI_PM NULL
#endif
static void sonypi_shutdown(struct platform_device *dev)
{
sonypi_disable();
}
static struct platform_driver sonypi_driver = {
.driver = {
.name = "sonypi",
.pm = SONYPI_PM,
},
.probe = sonypi_probe,
.remove = sonypi_remove,
.shutdown = sonypi_shutdown,
};
static struct platform_device *sonypi_platform_device;
static const struct dmi_system_id sonypi_dmi_table[] __initconst = {
{
.ident = "Sony Vaio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PCG-"),
},
},
{
.ident = "Sony Vaio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-"),
},
},
{ }
};
static int __init sonypi_init(void)
{
int error;
printk(KERN_INFO
"sonypi: Sony Programmable I/O Controller Driver v%s.\n",
SONYPI_DRIVER_VERSION);
if (!dmi_check_system(sonypi_dmi_table))
return -ENODEV;
error = platform_driver_register(&sonypi_driver);
if (error)
return error;
sonypi_platform_device = platform_device_alloc("sonypi", -1);
if (!sonypi_platform_device) {
error = -ENOMEM;
goto err_driver_unregister;
}
error = platform_device_add(sonypi_platform_device);
if (error)
goto err_free_device;
#ifdef CONFIG_ACPI
if (acpi_bus_register_driver(&sonypi_acpi_driver) >= 0)
acpi_driver_registered = 1;
#endif
return 0;
err_free_device:
platform_device_put(sonypi_platform_device);
err_driver_unregister:
platform_driver_unregister(&sonypi_driver);
return error;
}
static void __exit sonypi_exit(void)
{
#ifdef CONFIG_ACPI
if (acpi_driver_registered)
acpi_bus_unregister_driver(&sonypi_acpi_driver);
#endif
platform_device_unregister(sonypi_platform_device);
platform_driver_unregister(&sonypi_driver);
printk(KERN_INFO "sonypi: removed.\n");
}
module_init(sonypi_init);
module_exit(sonypi_exit);
|
linux-master
|
drivers/char/sonypi.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/drivers/char/ttyprintk.c
*
* Copyright (C) 2010 Samo Pogacnik
*/
/*
* This pseudo device allows user to make printk messages. It is possible
* to store "console" messages inline with kernel messages for better analyses
* of the boot process, for example.
*/
#include <linux/console.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/spinlock.h>
struct ttyprintk_port {
struct tty_port port;
spinlock_t spinlock;
};
static struct ttyprintk_port tpk_port;
/*
* Our simple preformatting supports transparent output of (time-stamped)
* printk messages (also suitable for logging service):
* - any cr is replaced by nl
* - adds a ttyprintk source tag in front of each line
* - too long message is fragmented, with '\'nl between fragments
* - TPK_STR_SIZE isn't really the write_room limiting factor, because
* it is emptied on the fly during preformatting.
*/
#define TPK_STR_SIZE 508 /* should be bigger then max expected line length */
#define TPK_MAX_ROOM 4096 /* we could assume 4K for instance */
#define TPK_PREFIX KERN_SOH __stringify(CONFIG_TTY_PRINTK_LEVEL)
static int tpk_curr;
static char tpk_buffer[TPK_STR_SIZE + 4];
static void tpk_flush(void)
{
if (tpk_curr > 0) {
tpk_buffer[tpk_curr] = '\0';
printk(TPK_PREFIX "[U] %s\n", tpk_buffer);
tpk_curr = 0;
}
}
static int tpk_printk(const u8 *buf, int count)
{
int i;
for (i = 0; i < count; i++) {
if (tpk_curr >= TPK_STR_SIZE) {
/* end of tmp buffer reached: cut the message in two */
tpk_buffer[tpk_curr++] = '\\';
tpk_flush();
}
switch (buf[i]) {
case '\r':
tpk_flush();
if ((i + 1) < count && buf[i + 1] == '\n')
i++;
break;
case '\n':
tpk_flush();
break;
default:
tpk_buffer[tpk_curr++] = buf[i];
break;
}
}
return count;
}
/*
* TTY operations open function.
*/
static int tpk_open(struct tty_struct *tty, struct file *filp)
{
tty->driver_data = &tpk_port;
return tty_port_open(&tpk_port.port, tty, filp);
}
/*
* TTY operations close function.
*/
static void tpk_close(struct tty_struct *tty, struct file *filp)
{
struct ttyprintk_port *tpkp = tty->driver_data;
tty_port_close(&tpkp->port, tty, filp);
}
/*
* TTY operations write function.
*/
static ssize_t tpk_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct ttyprintk_port *tpkp = tty->driver_data;
unsigned long flags;
int ret;
/* exclusive use of tpk_printk within this tty */
spin_lock_irqsave(&tpkp->spinlock, flags);
ret = tpk_printk(buf, count);
spin_unlock_irqrestore(&tpkp->spinlock, flags);
return ret;
}
/*
* TTY operations write_room function.
*/
static unsigned int tpk_write_room(struct tty_struct *tty)
{
return TPK_MAX_ROOM;
}
/*
* TTY operations hangup function.
*/
static void tpk_hangup(struct tty_struct *tty)
{
struct ttyprintk_port *tpkp = tty->driver_data;
tty_port_hangup(&tpkp->port);
}
/*
* TTY port operations shutdown function.
*/
static void tpk_port_shutdown(struct tty_port *tport)
{
struct ttyprintk_port *tpkp =
container_of(tport, struct ttyprintk_port, port);
unsigned long flags;
spin_lock_irqsave(&tpkp->spinlock, flags);
tpk_flush();
spin_unlock_irqrestore(&tpkp->spinlock, flags);
}
static const struct tty_operations ttyprintk_ops = {
.open = tpk_open,
.close = tpk_close,
.write = tpk_write,
.write_room = tpk_write_room,
.hangup = tpk_hangup,
};
static const struct tty_port_operations tpk_port_ops = {
.shutdown = tpk_port_shutdown,
};
static struct tty_driver *ttyprintk_driver;
static struct tty_driver *ttyprintk_console_device(struct console *c,
int *index)
{
*index = 0;
return ttyprintk_driver;
}
static struct console ttyprintk_console = {
.name = "ttyprintk",
.device = ttyprintk_console_device,
};
static int __init ttyprintk_init(void)
{
int ret;
spin_lock_init(&tpk_port.spinlock);
ttyprintk_driver = tty_alloc_driver(1,
TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_UNNUMBERED_NODE);
if (IS_ERR(ttyprintk_driver))
return PTR_ERR(ttyprintk_driver);
tty_port_init(&tpk_port.port);
tpk_port.port.ops = &tpk_port_ops;
ttyprintk_driver->driver_name = "ttyprintk";
ttyprintk_driver->name = "ttyprintk";
ttyprintk_driver->major = TTYAUX_MAJOR;
ttyprintk_driver->minor_start = 3;
ttyprintk_driver->type = TTY_DRIVER_TYPE_CONSOLE;
ttyprintk_driver->init_termios = tty_std_termios;
ttyprintk_driver->init_termios.c_oflag = OPOST | OCRNL | ONOCR | ONLRET;
tty_set_operations(ttyprintk_driver, &ttyprintk_ops);
tty_port_link_device(&tpk_port.port, ttyprintk_driver, 0);
ret = tty_register_driver(ttyprintk_driver);
if (ret < 0) {
printk(KERN_ERR "Couldn't register ttyprintk driver\n");
goto error;
}
register_console(&ttyprintk_console);
return 0;
error:
tty_driver_kref_put(ttyprintk_driver);
tty_port_destroy(&tpk_port.port);
return ret;
}
static void __exit ttyprintk_exit(void)
{
unregister_console(&ttyprintk_console);
tty_unregister_driver(ttyprintk_driver);
tty_driver_kref_put(ttyprintk_driver);
tty_port_destroy(&tpk_port.port);
}
device_initcall(ttyprintk_init);
module_exit(ttyprintk_exit);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/char/ttyprintk.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* hangcheck-timer.c
*
* Driver for a little io fencing timer.
*
* Copyright (C) 2002, 2003 Oracle. All rights reserved.
*
* Author: Joel Becker <[email protected]>
*/
/*
* The hangcheck-timer driver uses the TSC to catch delays that
* jiffies does not notice. A timer is set. When the timer fires, it
* checks whether it was delayed and if that delay exceeds a given
* margin of error. The hangcheck_tick module parameter takes the timer
* duration in seconds. The hangcheck_margin parameter defines the
* margin of error, in seconds. The defaults are 60 seconds for the
* timer and 180 seconds for the margin of error. IOW, a timer is set
* for 60 seconds. When the timer fires, the callback checks the
* actual duration that the timer waited. If the duration exceeds the
* allotted time and margin (here 60 + 180, or 240 seconds), the machine
* is restarted. A healthy machine will have the duration match the
* expected timeout very closely.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/sysrq.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
#define VERSION_STR "0.9.1"
#define DEFAULT_IOFENCE_MARGIN 60 /* Default fudge factor, in seconds */
#define DEFAULT_IOFENCE_TICK 180 /* Default timer timeout, in seconds */
static int hangcheck_tick = DEFAULT_IOFENCE_TICK;
static int hangcheck_margin = DEFAULT_IOFENCE_MARGIN;
static int hangcheck_reboot; /* Defaults to not reboot */
static int hangcheck_dump_tasks; /* Defaults to not dumping SysRQ T */
/* options - modular */
module_param(hangcheck_tick, int, 0);
MODULE_PARM_DESC(hangcheck_tick, "Timer delay.");
module_param(hangcheck_margin, int, 0);
MODULE_PARM_DESC(hangcheck_margin, "If the hangcheck timer has been delayed more than hangcheck_margin seconds, the driver will fire.");
module_param(hangcheck_reboot, int, 0);
MODULE_PARM_DESC(hangcheck_reboot, "If nonzero, the machine will reboot when the timer margin is exceeded.");
module_param(hangcheck_dump_tasks, int, 0);
MODULE_PARM_DESC(hangcheck_dump_tasks, "If nonzero, the machine will dump the system task state when the timer margin is exceeded.");
MODULE_AUTHOR("Oracle");
MODULE_DESCRIPTION("Hangcheck-timer detects when the system has gone out to lunch past a certain margin.");
MODULE_LICENSE("GPL");
MODULE_VERSION(VERSION_STR);
/* options - nonmodular */
#ifndef MODULE
static int __init hangcheck_parse_tick(char *str)
{
int par;
if (get_option(&str,&par))
hangcheck_tick = par;
return 1;
}
static int __init hangcheck_parse_margin(char *str)
{
int par;
if (get_option(&str,&par))
hangcheck_margin = par;
return 1;
}
static int __init hangcheck_parse_reboot(char *str)
{
int par;
if (get_option(&str,&par))
hangcheck_reboot = par;
return 1;
}
static int __init hangcheck_parse_dump_tasks(char *str)
{
int par;
if (get_option(&str,&par))
hangcheck_dump_tasks = par;
return 1;
}
__setup("hcheck_tick", hangcheck_parse_tick);
__setup("hcheck_margin", hangcheck_parse_margin);
__setup("hcheck_reboot", hangcheck_parse_reboot);
__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
#endif /* not MODULE */
#define TIMER_FREQ 1000000000ULL
/* Last time scheduled */
static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
static void hangcheck_fire(struct timer_list *);
static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
static void hangcheck_fire(struct timer_list *unused)
{
unsigned long long cur_tsc, tsc_diff;
cur_tsc = ktime_get_ns();
if (cur_tsc > hangcheck_tsc)
tsc_diff = cur_tsc - hangcheck_tsc;
else
tsc_diff = (cur_tsc + (~0ULL - hangcheck_tsc)); /* or something */
if (tsc_diff > hangcheck_tsc_margin) {
if (hangcheck_dump_tasks) {
printk(KERN_CRIT "Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
handle_sysrq('t');
#endif /* CONFIG_MAGIC_SYSRQ */
}
if (hangcheck_reboot) {
printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
emergency_restart();
} else {
printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
}
}
#if 0
/*
* Enable to investigate delays in detail
*/
printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
#endif
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
hangcheck_tsc = ktime_get_ns();
}
static int __init hangcheck_init(void)
{
printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
VERSION_STR, hangcheck_tick, hangcheck_margin);
hangcheck_tsc_margin =
(unsigned long long)hangcheck_margin + hangcheck_tick;
hangcheck_tsc_margin *= TIMER_FREQ;
hangcheck_tsc = ktime_get_ns();
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
return 0;
}
static void __exit hangcheck_exit(void)
{
del_timer_sync(&hangcheck_ticktock);
printk("Hangcheck: Stopped hangcheck timer.\n");
}
module_init(hangcheck_init);
module_exit(hangcheck_exit);
|
linux-master
|
drivers/char/hangcheck-timer.c
|
// SPDX-License-Identifier: GPL-2.0-only
/* linux/drivers/char/nsc_gpio.c
National Semiconductor common GPIO device-file/VFS methods.
Allows a user space process to control the GPIO pins.
Copyright (c) 2001,2002 Christer Weinigel <[email protected]>
Copyright (c) 2005 Jim Cromie <[email protected]>
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/nsc_gpio.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#define NAME "nsc_gpio"
void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
{
/* retrieve current config w/o changing it */
u32 config = amp->gpio_config(index, ~0, 0);
/* user requested via 'v' command, so its INFO */
dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n",
index, config,
(config & 1) ? "OE" : "TS", /* output-enabled/tristate */
(config & 2) ? "PP" : "OD", /* push pull / open drain */
(config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */
(config & 8) ? "LOCKED" : "", /* locked / unlocked */
(config & 16) ? "LEVEL" : "EDGE",/* level/edge input */
(config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */
(config & 64) ? "DEBOUNCE" : "", /* debounce */
amp->gpio_get(index), amp->gpio_current(index));
}
ssize_t nsc_gpio_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
unsigned m = iminor(file_inode(file));
struct nsc_gpio_ops *amp = file->private_data;
struct device *dev = amp->dev;
size_t i;
int err = 0;
for (i = 0; i < len; ++i) {
char c;
if (get_user(c, data + i))
return -EFAULT;
switch (c) {
case '0':
amp->gpio_set(m, 0);
break;
case '1':
amp->gpio_set(m, 1);
break;
case 'O':
dev_dbg(dev, "GPIO%d output enabled\n", m);
amp->gpio_config(m, ~1, 1);
break;
case 'o':
dev_dbg(dev, "GPIO%d output disabled\n", m);
amp->gpio_config(m, ~1, 0);
break;
case 'T':
dev_dbg(dev, "GPIO%d output is push pull\n", m);
amp->gpio_config(m, ~2, 2);
break;
case 't':
dev_dbg(dev, "GPIO%d output is open drain\n", m);
amp->gpio_config(m, ~2, 0);
break;
case 'P':
dev_dbg(dev, "GPIO%d pull up enabled\n", m);
amp->gpio_config(m, ~4, 4);
break;
case 'p':
dev_dbg(dev, "GPIO%d pull up disabled\n", m);
amp->gpio_config(m, ~4, 0);
break;
case 'v':
/* View Current pin settings */
amp->gpio_dump(amp, m);
break;
case '\n':
/* end of settings string, do nothing */
break;
default:
dev_err(dev, "io%2d bad setting: chr<0x%2x>\n",
m, (int)c);
err++;
}
}
if (err)
return -EINVAL; /* full string handled, report error */
return len;
}
ssize_t nsc_gpio_read(struct file *file, char __user * buf,
size_t len, loff_t * ppos)
{
unsigned m = iminor(file_inode(file));
int value;
struct nsc_gpio_ops *amp = file->private_data;
value = amp->gpio_get(m);
if (put_user(value ? '1' : '0', buf))
return -EFAULT;
return 1;
}
/* common file-ops routines for both scx200_gpio and pc87360_gpio */
EXPORT_SYMBOL(nsc_gpio_write);
EXPORT_SYMBOL(nsc_gpio_read);
EXPORT_SYMBOL(nsc_gpio_dump);
static int __init nsc_gpio_init(void)
{
printk(KERN_DEBUG NAME " initializing\n");
return 0;
}
static void __exit nsc_gpio_cleanup(void)
{
printk(KERN_DEBUG NAME " cleanup\n");
}
module_init(nsc_gpio_init);
module_exit(nsc_gpio_cleanup);
MODULE_AUTHOR("Jim Cromie <[email protected]>");
MODULE_DESCRIPTION("NatSemi GPIO Common Methods");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/char/nsc_gpio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel & MS High Precision Event Timer Implementation.
*
* Copyright (C) 2003 Intel Corporation
* Venki Pallipadi
* (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
* Bob Picco <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/major.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
#include <linux/bcd.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/compat.h>
#include <linux/clocksource.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/acpi.h>
#include <linux/hpet.h>
#include <asm/current.h>
#include <asm/irq.h>
#include <asm/div64.h>
/*
* The High Precision Event Timer driver.
* This driver is closely modelled after the rtc.c driver.
* See HPET spec revision 1.
*/
#define HPET_USER_FREQ (64)
#define HPET_DRIFT (500)
#define HPET_RANGE_SIZE 1024 /* from HPET spec */
/* WARNING -- don't get confused. These macros are never used
* to write the (single) counter, and rarely to read it.
* They're badly named; to fix, someday.
*/
#if BITS_PER_LONG == 64
#define write_counter(V, MC) writeq(V, MC)
#define read_counter(MC) readq(MC)
#else
#define write_counter(V, MC) writel(V, MC)
#define read_counter(MC) readl(MC)
#endif
static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
/* This clocksource driver currently only works on ia64 */
#ifdef CONFIG_IA64
static void __iomem *hpet_mctr;
static u64 read_hpet(struct clocksource *cs)
{
return (u64)read_counter((void __iomem *)hpet_mctr);
}
static struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
.read = read_hpet,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *hpet_clocksource;
#endif
/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);
#define HPET_DEV_NAME (7)
struct hpet_dev {
struct hpets *hd_hpets;
struct hpet __iomem *hd_hpet;
struct hpet_timer __iomem *hd_timer;
unsigned long hd_ireqfreq;
unsigned long hd_irqdata;
wait_queue_head_t hd_waitqueue;
struct fasync_struct *hd_async_queue;
unsigned int hd_flags;
unsigned int hd_irq;
unsigned int hd_hdwirq;
char hd_name[HPET_DEV_NAME];
};
struct hpets {
struct hpets *hp_next;
struct hpet __iomem *hp_hpet;
unsigned long hp_hpet_phys;
struct clocksource *hp_clocksource;
unsigned long long hp_tick_freq;
unsigned long hp_delta;
unsigned int hp_ntimer;
unsigned int hp_which;
struct hpet_dev hp_dev[];
};
static struct hpets *hpets;
#define HPET_OPEN 0x0001
#define HPET_IE 0x0002 /* interrupt enabled */
#define HPET_PERIODIC 0x0004
#define HPET_SHARED_IRQ 0x0008
static irqreturn_t hpet_interrupt(int irq, void *data)
{
struct hpet_dev *devp;
unsigned long isr;
devp = data;
isr = 1 << (devp - devp->hd_hpets->hp_dev);
if ((devp->hd_flags & HPET_SHARED_IRQ) &&
!(isr & readl(&devp->hd_hpet->hpet_isr)))
return IRQ_NONE;
spin_lock(&hpet_lock);
devp->hd_irqdata++;
/*
* For non-periodic timers, increment the accumulator.
* This has the effect of treating non-periodic like periodic.
*/
if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
unsigned long t, mc, base, k;
struct hpet __iomem *hpet = devp->hd_hpet;
struct hpets *hpetp = devp->hd_hpets;
t = devp->hd_ireqfreq;
read_counter(&devp->hd_timer->hpet_compare);
mc = read_counter(&hpet->hpet_mc);
/* The time for the next interrupt would logically be t + m,
* however, if we are very unlucky and the interrupt is delayed
* for longer than t then we will completely miss the next
* interrupt if we set t + m and an application will hang.
* Therefore we need to make a more complex computation assuming
* that there exists a k for which the following is true:
* k * t + base < mc + delta
* (k + 1) * t + base > mc + delta
* where t is the interval in hpet ticks for the given freq,
* base is the theoretical start value 0 < base < t,
* mc is the main counter value at the time of the interrupt,
* delta is the time it takes to write the a value to the
* comparator.
* k may then be computed as (mc - base + delta) / t .
*/
base = mc % t;
k = (mc - base + hpetp->hp_delta) / t;
write_counter(t * (k + 1) + base,
&devp->hd_timer->hpet_compare);
}
if (devp->hd_flags & HPET_SHARED_IRQ)
writel(isr, &devp->hd_hpet->hpet_isr);
spin_unlock(&hpet_lock);
wake_up_interruptible(&devp->hd_waitqueue);
kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
return IRQ_HANDLED;
}
static void hpet_timer_set_irq(struct hpet_dev *devp)
{
unsigned long v;
int irq, gsi;
struct hpet_timer __iomem *timer;
spin_lock_irq(&hpet_lock);
if (devp->hd_hdwirq) {
spin_unlock_irq(&hpet_lock);
return;
}
timer = devp->hd_timer;
/* we prefer level triggered mode */
v = readl(&timer->hpet_config);
if (!(v & Tn_INT_TYPE_CNF_MASK)) {
v |= Tn_INT_TYPE_CNF_MASK;
writel(v, &timer->hpet_config);
}
spin_unlock_irq(&hpet_lock);
v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
Tn_INT_ROUTE_CAP_SHIFT;
/*
* In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
* legacy device. In IO APIC mode, we skip all the legacy IRQS.
*/
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
v &= ~0xf3df;
else
v &= ~0xffff;
for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
if (irq >= nr_irqs) {
irq = HPET_MAX_IRQ;
break;
}
gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
ACPI_ACTIVE_LOW);
if (gsi > 0)
break;
/* FIXME: Setup interrupt source table */
}
if (irq < HPET_MAX_IRQ) {
spin_lock_irq(&hpet_lock);
v = readl(&timer->hpet_config);
v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
writel(v, &timer->hpet_config);
devp->hd_hdwirq = gsi;
spin_unlock_irq(&hpet_lock);
}
return;
}
static int hpet_open(struct inode *inode, struct file *file)
{
struct hpet_dev *devp;
struct hpets *hpetp;
int i;
if (file->f_mode & FMODE_WRITE)
return -EINVAL;
mutex_lock(&hpet_mutex);
spin_lock_irq(&hpet_lock);
for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
for (i = 0; i < hpetp->hp_ntimer; i++)
if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) {
continue;
} else {
devp = &hpetp->hp_dev[i];
break;
}
if (!devp) {
spin_unlock_irq(&hpet_lock);
mutex_unlock(&hpet_mutex);
return -EBUSY;
}
file->private_data = devp;
devp->hd_irqdata = 0;
devp->hd_flags |= HPET_OPEN;
spin_unlock_irq(&hpet_lock);
mutex_unlock(&hpet_mutex);
hpet_timer_set_irq(devp);
return 0;
}
static ssize_t
hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long data;
ssize_t retval;
struct hpet_dev *devp;
devp = file->private_data;
if (!devp->hd_ireqfreq)
return -EIO;
if (count < sizeof(unsigned long))
return -EINVAL;
add_wait_queue(&devp->hd_waitqueue, &wait);
for ( ; ; ) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&hpet_lock);
data = devp->hd_irqdata;
devp->hd_irqdata = 0;
spin_unlock_irq(&hpet_lock);
if (data) {
break;
} else if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto out;
} else if (signal_pending(current)) {
retval = -ERESTARTSYS;
goto out;
}
schedule();
}
retval = put_user(data, (unsigned long __user *)buf);
if (!retval)
retval = sizeof(unsigned long);
out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&devp->hd_waitqueue, &wait);
return retval;
}
static __poll_t hpet_poll(struct file *file, poll_table * wait)
{
unsigned long v;
struct hpet_dev *devp;
devp = file->private_data;
if (!devp->hd_ireqfreq)
return 0;
poll_wait(file, &devp->hd_waitqueue, wait);
spin_lock_irq(&hpet_lock);
v = devp->hd_irqdata;
spin_unlock_irq(&hpet_lock);
if (v != 0)
return EPOLLIN | EPOLLRDNORM;
return 0;
}
#ifdef CONFIG_HPET_MMAP
#ifdef CONFIG_HPET_MMAP_DEFAULT
static int hpet_mmap_enabled = 1;
#else
static int hpet_mmap_enabled = 0;
#endif
static __init int hpet_mmap_enable(char *str)
{
get_option(&str, &hpet_mmap_enabled);
pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
return 1;
}
__setup("hpet_mmap=", hpet_mmap_enable);
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
struct hpet_dev *devp;
unsigned long addr;
if (!hpet_mmap_enabled)
return -EACCES;
devp = file->private_data;
addr = devp->hd_hpets->hp_hpet_phys;
if (addr & (PAGE_SIZE - 1))
return -ENOSYS;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, addr, PAGE_SIZE);
}
#else
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
return -ENOSYS;
}
#endif
static int hpet_fasync(int fd, struct file *file, int on)
{
struct hpet_dev *devp;
devp = file->private_data;
if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
return 0;
else
return -EIO;
}
static int hpet_release(struct inode *inode, struct file *file)
{
struct hpet_dev *devp;
struct hpet_timer __iomem *timer;
int irq = 0;
devp = file->private_data;
timer = devp->hd_timer;
spin_lock_irq(&hpet_lock);
writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
&timer->hpet_config);
irq = devp->hd_irq;
devp->hd_irq = 0;
devp->hd_ireqfreq = 0;
if (devp->hd_flags & HPET_PERIODIC
&& readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
unsigned long v;
v = readq(&timer->hpet_config);
v ^= Tn_TYPE_CNF_MASK;
writeq(v, &timer->hpet_config);
}
devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
spin_unlock_irq(&hpet_lock);
if (irq)
free_irq(irq, devp);
file->private_data = NULL;
return 0;
}
static int hpet_ioctl_ieon(struct hpet_dev *devp)
{
struct hpet_timer __iomem *timer;
struct hpet __iomem *hpet;
struct hpets *hpetp;
int irq;
unsigned long g, v, t, m;
unsigned long flags, isr;
timer = devp->hd_timer;
hpet = devp->hd_hpet;
hpetp = devp->hd_hpets;
if (!devp->hd_ireqfreq)
return -EIO;
spin_lock_irq(&hpet_lock);
if (devp->hd_flags & HPET_IE) {
spin_unlock_irq(&hpet_lock);
return -EBUSY;
}
devp->hd_flags |= HPET_IE;
if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
devp->hd_flags |= HPET_SHARED_IRQ;
spin_unlock_irq(&hpet_lock);
irq = devp->hd_hdwirq;
if (irq) {
unsigned long irq_flags;
if (devp->hd_flags & HPET_SHARED_IRQ) {
/*
* To prevent the interrupt handler from seeing an
* unwanted interrupt status bit, program the timer
* so that it will not fire in the near future ...
*/
writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
&timer->hpet_config);
write_counter(read_counter(&hpet->hpet_mc),
&timer->hpet_compare);
/* ... and clear any left-over status. */
isr = 1 << (devp - devp->hd_hpets->hp_dev);
writel(isr, &hpet->hpet_isr);
}
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0;
if (request_irq(irq, hpet_interrupt, irq_flags,
devp->hd_name, (void *)devp)) {
printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
irq = 0;
}
}
if (irq == 0) {
spin_lock_irq(&hpet_lock);
devp->hd_flags ^= HPET_IE;
spin_unlock_irq(&hpet_lock);
return -EIO;
}
devp->hd_irq = irq;
t = devp->hd_ireqfreq;
v = readq(&timer->hpet_config);
/* 64-bit comparators are not yet supported through the ioctls,
* so force this into 32-bit mode if it supports both modes
*/
g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
if (devp->hd_flags & HPET_PERIODIC) {
g |= Tn_TYPE_CNF_MASK;
v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
writeq(v, &timer->hpet_config);
local_irq_save(flags);
/*
* NOTE: First we modify the hidden accumulator
* register supported by periodic-capable comparators.
* We never want to modify the (single) counter; that
* would affect all the comparators. The value written
* is the counter value when the first interrupt is due.
*/
m = read_counter(&hpet->hpet_mc);
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
/*
* Then we modify the comparator, indicating the period
* for subsequent interrupt.
*/
write_counter(t, &timer->hpet_compare);
} else {
local_irq_save(flags);
m = read_counter(&hpet->hpet_mc);
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
}
if (devp->hd_flags & HPET_SHARED_IRQ) {
isr = 1 << (devp - devp->hd_hpets->hp_dev);
writel(isr, &hpet->hpet_isr);
}
writeq(g, &timer->hpet_config);
local_irq_restore(flags);
return 0;
}
/* converts Hz to number of timer ticks */
static inline unsigned long hpet_time_div(struct hpets *hpets,
unsigned long dis)
{
unsigned long long m;
m = hpets->hp_tick_freq + (dis >> 1);
return div64_ul(m, dis);
}
static int
hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
struct hpet_info *info)
{
struct hpet_timer __iomem *timer;
struct hpets *hpetp;
int err;
unsigned long v;
switch (cmd) {
case HPET_IE_OFF:
case HPET_INFO:
case HPET_EPI:
case HPET_DPI:
case HPET_IRQFREQ:
timer = devp->hd_timer;
hpetp = devp->hd_hpets;
break;
case HPET_IE_ON:
return hpet_ioctl_ieon(devp);
default:
return -EINVAL;
}
err = 0;
switch (cmd) {
case HPET_IE_OFF:
if ((devp->hd_flags & HPET_IE) == 0)
break;
v = readq(&timer->hpet_config);
v &= ~Tn_INT_ENB_CNF_MASK;
writeq(v, &timer->hpet_config);
if (devp->hd_irq) {
free_irq(devp->hd_irq, devp);
devp->hd_irq = 0;
}
devp->hd_flags ^= HPET_IE;
break;
case HPET_INFO:
{
memset(info, 0, sizeof(*info));
if (devp->hd_ireqfreq)
info->hi_ireqfreq =
hpet_time_div(hpetp, devp->hd_ireqfreq);
info->hi_flags =
readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
info->hi_hpet = hpetp->hp_which;
info->hi_timer = devp - hpetp->hp_dev;
break;
}
case HPET_EPI:
v = readq(&timer->hpet_config);
if ((v & Tn_PER_INT_CAP_MASK) == 0) {
err = -ENXIO;
break;
}
devp->hd_flags |= HPET_PERIODIC;
break;
case HPET_DPI:
v = readq(&timer->hpet_config);
if ((v & Tn_PER_INT_CAP_MASK) == 0) {
err = -ENXIO;
break;
}
if (devp->hd_flags & HPET_PERIODIC &&
readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
v = readq(&timer->hpet_config);
v ^= Tn_TYPE_CNF_MASK;
writeq(v, &timer->hpet_config);
}
devp->hd_flags &= ~HPET_PERIODIC;
break;
case HPET_IRQFREQ:
if ((arg > hpet_max_freq) &&
!capable(CAP_SYS_RESOURCE)) {
err = -EACCES;
break;
}
if (!arg) {
err = -EINVAL;
break;
}
devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
}
return err;
}
static long
hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hpet_info info;
int err;
mutex_lock(&hpet_mutex);
err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
mutex_unlock(&hpet_mutex);
if ((cmd == HPET_INFO) && !err &&
(copy_to_user((void __user *)arg, &info, sizeof(info))))
err = -EFAULT;
return err;
}
#ifdef CONFIG_COMPAT
struct compat_hpet_info {
compat_ulong_t hi_ireqfreq; /* Hz */
compat_ulong_t hi_flags; /* information */
unsigned short hi_hpet;
unsigned short hi_timer;
};
static long
hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hpet_info info;
int err;
mutex_lock(&hpet_mutex);
err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
mutex_unlock(&hpet_mutex);
if ((cmd == HPET_INFO) && !err) {
struct compat_hpet_info __user *u = compat_ptr(arg);
if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
put_user(info.hi_flags, &u->hi_flags) ||
put_user(info.hi_hpet, &u->hi_hpet) ||
put_user(info.hi_timer, &u->hi_timer))
err = -EFAULT;
}
return err;
}
#endif
static const struct file_operations hpet_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = hpet_read,
.poll = hpet_poll,
.unlocked_ioctl = hpet_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = hpet_compat_ioctl,
#endif
.open = hpet_open,
.release = hpet_release,
.fasync = hpet_fasync,
.mmap = hpet_mmap,
};
static int hpet_is_known(struct hpet_data *hdp)
{
struct hpets *hpetp;
for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
return 1;
return 0;
}
static struct ctl_table hpet_table[] = {
{
.procname = "max-user-freq",
.data = &hpet_max_freq,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{}
};
static struct ctl_table_header *sysctl_header;
/*
* Adjustment for when arming the timer with
* initial conditions. That is, main counter
* ticks expired before interrupts are enabled.
*/
#define TICK_CALIBRATE (1000UL)
static unsigned long __hpet_calibrate(struct hpets *hpetp)
{
struct hpet_timer __iomem *timer = NULL;
unsigned long t, m, count, i, flags, start;
struct hpet_dev *devp;
int j;
struct hpet __iomem *hpet;
for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
if ((devp->hd_flags & HPET_OPEN) == 0) {
timer = devp->hd_timer;
break;
}
if (!timer)
return 0;
hpet = hpetp->hp_hpet;
t = read_counter(&timer->hpet_compare);
i = 0;
count = hpet_time_div(hpetp, TICK_CALIBRATE);
local_irq_save(flags);
start = read_counter(&hpet->hpet_mc);
do {
m = read_counter(&hpet->hpet_mc);
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
} while (i++, (m - start) < count);
local_irq_restore(flags);
return (m - start) / i;
}
static unsigned long hpet_calibrate(struct hpets *hpetp)
{
unsigned long ret = ~0UL;
unsigned long tmp;
/*
* Try to calibrate until return value becomes stable small value.
* If SMI interruption occurs in calibration loop, the return value
* will be big. This avoids its impact.
*/
for ( ; ; ) {
tmp = __hpet_calibrate(hpetp);
if (ret <= tmp)
break;
ret = tmp;
}
return ret;
}
int hpet_alloc(struct hpet_data *hdp)
{
u64 cap, mcfg;
struct hpet_dev *devp;
u32 i, ntimer;
struct hpets *hpetp;
struct hpet __iomem *hpet;
static struct hpets *last;
unsigned long period;
unsigned long long temp;
u32 remainder;
/*
* hpet_alloc can be called by platform dependent code.
* If platform dependent code has allocated the hpet that
* ACPI has also reported, then we catch it here.
*/
if (hpet_is_known(hdp)) {
printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
__func__);
return 0;
}
hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs),
GFP_KERNEL);
if (!hpetp)
return -ENOMEM;
hpetp->hp_which = hpet_nhpet++;
hpetp->hp_hpet = hdp->hd_address;
hpetp->hp_hpet_phys = hdp->hd_phys_address;
hpetp->hp_ntimer = hdp->hd_nirqs;
for (i = 0; i < hdp->hd_nirqs; i++)
hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
hpet = hpetp->hp_hpet;
cap = readq(&hpet->hpet_cap);
ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
if (hpetp->hp_ntimer != ntimer) {
printk(KERN_WARNING "hpet: number irqs doesn't agree"
" with number of timers\n");
kfree(hpetp);
return -ENODEV;
}
if (last)
last->hp_next = hpetp;
else
hpets = hpetp;
last = hpetp;
period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
temp += period >> 1; /* round */
do_div(temp, period);
hpetp->hp_tick_freq = temp; /* ticks per second */
printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
hpetp->hp_ntimer > 1 ? "s" : "");
for (i = 0; i < hpetp->hp_ntimer; i++)
printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
temp = hpetp->hp_tick_freq;
remainder = do_div(temp, 1000000);
printk(KERN_INFO
"hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
hpetp->hp_which, hpetp->hp_ntimer,
cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
(unsigned) temp, remainder);
mcfg = readq(&hpet->hpet_config);
if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
write_counter(0L, &hpet->hpet_mc);
mcfg |= HPET_ENABLE_CNF_MASK;
writeq(mcfg, &hpet->hpet_config);
}
for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
struct hpet_timer __iomem *timer;
timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
devp->hd_hpets = hpetp;
devp->hd_hpet = hpet;
devp->hd_timer = timer;
/*
* If the timer was reserved by platform code,
* then make timer unavailable for opens.
*/
if (hdp->hd_state & (1 << i)) {
devp->hd_flags = HPET_OPEN;
continue;
}
init_waitqueue_head(&devp->hd_waitqueue);
}
hpetp->hp_delta = hpet_calibrate(hpetp);
/* This clocksource driver currently only works on ia64 */
#ifdef CONFIG_IA64
if (!hpet_clocksource) {
hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
hpetp->hp_clocksource = &clocksource_hpet;
hpet_clocksource = &clocksource_hpet;
}
#endif
return 0;
}
static acpi_status hpet_resources(struct acpi_resource *res, void *data)
{
struct hpet_data *hdp;
acpi_status status;
struct acpi_resource_address64 addr;
hdp = data;
status = acpi_resource_to_address64(res, &addr);
if (ACPI_SUCCESS(status)) {
hdp->hd_phys_address = addr.address.minimum;
hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length);
if (!hdp->hd_address)
return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
return AE_ALREADY_EXISTS;
}
} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
struct acpi_resource_fixed_memory32 *fixmem32;
fixmem32 = &res->data.fixed_memory32;
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE);
if (!hdp->hd_address)
return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
return AE_ALREADY_EXISTS;
}
} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
struct acpi_resource_extended_irq *irqp;
int i, irq;
irqp = &res->data.extended_irq;
for (i = 0; i < irqp->interrupt_count; i++) {
if (hdp->hd_nirqs >= HPET_MAX_TIMERS)
break;
irq = acpi_register_gsi(NULL, irqp->interrupts[i],
irqp->triggering,
irqp->polarity);
if (irq < 0)
return AE_ERROR;
hdp->hd_irq[hdp->hd_nirqs] = irq;
hdp->hd_nirqs++;
}
}
return AE_OK;
}
static int hpet_acpi_add(struct acpi_device *device)
{
acpi_status result;
struct hpet_data data;
memset(&data, 0, sizeof(data));
result =
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
hpet_resources, &data);
if (ACPI_FAILURE(result))
return -ENODEV;
if (!data.hd_address || !data.hd_nirqs) {
if (data.hd_address)
iounmap(data.hd_address);
printk("%s: no address or irqs in _CRS\n", __func__);
return -ENODEV;
}
return hpet_alloc(&data);
}
static const struct acpi_device_id hpet_device_ids[] = {
{"PNP0103", 0},
{"", 0},
};
static struct acpi_driver hpet_acpi_driver = {
.name = "hpet",
.ids = hpet_device_ids,
.ops = {
.add = hpet_acpi_add,
},
};
static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
static int __init hpet_init(void)
{
int result;
result = misc_register(&hpet_misc);
if (result < 0)
return -ENODEV;
sysctl_header = register_sysctl("dev/hpet", hpet_table);
result = acpi_bus_register_driver(&hpet_acpi_driver);
if (result < 0) {
if (sysctl_header)
unregister_sysctl_table(sysctl_header);
misc_deregister(&hpet_misc);
return result;
}
return 0;
}
device_initcall(hpet_init);
/*
MODULE_AUTHOR("Bob Picco <[email protected]>");
MODULE_LICENSE("GPL");
*/
|
linux-master
|
drivers/char/hpet.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.